mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
303 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67c380b197 | ||
|
|
89eedd2123 | ||
|
|
2182e1cdc9 | ||
|
|
6d2a2ebadf | ||
|
|
9aed0034ec | ||
|
|
f764522cbe | ||
|
|
c7ae03e1b2 | ||
|
|
4efc0f5286 | ||
|
|
9052620453 | ||
|
|
0174397f6e | ||
|
|
9f5caf8fea | ||
|
|
3b3f2c78e2 | ||
|
|
242e4bccbf | ||
|
|
59ab89c98a | ||
|
|
ac768207ac | ||
|
|
77d41024dc | ||
|
|
f03083f6c8 | ||
|
|
5ff9ae2108 | ||
|
|
5fa03edb29 | ||
|
|
ebe4c9c971 | ||
|
|
6efe5ef496 | ||
|
|
fbbf5514d1 | ||
|
|
220af25bce | ||
|
|
c9252c06c4 | ||
|
|
2c565f5d59 | ||
|
|
1cb58e859e | ||
|
|
d26839c1f2 | ||
|
|
2cb8430ad4 | ||
|
|
03356fc7b5 | ||
|
|
bdc4045e23 | ||
|
|
dc1bd1ef62 | ||
|
|
35380dd9bf | ||
|
|
9674575892 | ||
|
|
b7d0d7cbb6 | ||
|
|
28eadac172 | ||
|
|
d5181496c4 | ||
|
|
b337a5720c | ||
|
|
53b8eb57ee | ||
|
|
30b4b045f5 | ||
|
|
ec1e7ae005 | ||
|
|
a949673e33 | ||
|
|
996f4c7f5a | ||
|
|
3915a6e15a | ||
|
|
961dd21554 | ||
|
|
2e4908e7c4 | ||
|
|
da637668a8 | ||
|
|
20168ad729 | ||
|
|
0b07a9f227 | ||
|
|
5dca662d01 | ||
|
|
8c28d1080c | ||
|
|
6a54a430e1 | ||
|
|
908d220eb2 | ||
|
|
ff1fd77425 | ||
|
|
e27bc8312f | ||
|
|
78968c1e29 | ||
|
|
fb431c11c1 | ||
|
|
30ed59e9c8 | ||
|
|
2e2d5199e8 | ||
|
|
4fe31cf1b3 | ||
|
|
e82e582cdf | ||
|
|
0b2d9d8576 | ||
|
|
65e3f3e007 | ||
|
|
2c28e4e7a3 | ||
|
|
642254daa6 | ||
|
|
c41140e15a | ||
|
|
23a6c20dd4 | ||
|
|
514f5f904f | ||
|
|
5844436716 | ||
|
|
5879b26b4b | ||
|
|
566efaef89 | ||
|
|
d9062a7e30 | ||
|
|
3f344aee55 | ||
|
|
fd93751bf7 | ||
|
|
325a2503f7 | ||
|
|
2179ac683e | ||
|
|
0f4dabfad8 | ||
|
|
8724dcd41b | ||
|
|
89e1200b73 | ||
|
|
0f677a09b6 | ||
|
|
c5dcf49ded | ||
|
|
a5881f924f | ||
|
|
d93ec64b21 | ||
|
|
a9a5973b98 | ||
|
|
570efe3d04 | ||
|
|
2e9c3895f4 | ||
|
|
9033f6801b | ||
|
|
c0b3767757 | ||
|
|
e72ff1bb4f | ||
|
|
0cb59bb018 | ||
|
|
6e549c90ba | ||
|
|
813233373e | ||
|
|
5757ce8894 | ||
|
|
7c11367cd8 | ||
|
|
6d2c37caf1 | ||
|
|
812311f6f7 | ||
|
|
22d81ef0ed | ||
|
|
414fcda9a2 | ||
|
|
bb2fc4cd5e | ||
|
|
5fd6a92052 | ||
|
|
7ccbe48f54 | ||
|
|
7a46cc0681 | ||
|
|
92d21c72b8 | ||
|
|
0cb681476e | ||
|
|
fa7b8ab60d | ||
|
|
bdb80271a3 | ||
|
|
1b8eb16fc7 | ||
|
|
1222ebb6db | ||
|
|
3e15e2fc1e | ||
|
|
667466020e | ||
|
|
f63ab1e136 | ||
|
|
6841d96f36 | ||
|
|
cae24068d4 | ||
|
|
dc0b8fad4f | ||
|
|
d3375d98a8 | ||
|
|
9d4c7cb4f7 | ||
|
|
ae2b2e74ca | ||
|
|
83179376d4 | ||
|
|
c36a852329 | ||
|
|
650a278fee | ||
|
|
6816337589 | ||
|
|
2950e4aeb4 | ||
|
|
746cc142d0 | ||
|
|
261428118e | ||
|
|
544e5309ad | ||
|
|
23dd951e59 | ||
|
|
498417a8fc | ||
|
|
617325b726 | ||
|
|
9e5cc81340 | ||
|
|
f75a5a5df8 | ||
|
|
ae8df9c32b | ||
|
|
90cbe49496 | ||
|
|
c31f46d973 | ||
|
|
83781d0b74 | ||
|
|
6488b0527c | ||
|
|
eeb8779cfc | ||
|
|
f40bbb92d1 | ||
|
|
4f0bef929f | ||
|
|
81a83cf100 | ||
|
|
8bbc589edd | ||
|
|
32245a9062 | ||
|
|
28c4f28d32 | ||
|
|
a2d4701f6e | ||
|
|
8e4022f8aa | ||
|
|
42e766e909 | ||
|
|
a686be8bd0 | ||
|
|
e3c3dea5d2 | ||
|
|
7754cfb6c6 | ||
|
|
f55a380ade | ||
|
|
9a317ffc0f | ||
|
|
3be4894b8a | ||
|
|
646411b881 | ||
|
|
0e99e4af4f | ||
|
|
e87337a97a | ||
|
|
53523b3eef | ||
|
|
5ec02b28a5 | ||
|
|
1620290305 | ||
|
|
fc171434c5 | ||
|
|
b08f3f760d | ||
|
|
7495961d6b | ||
|
|
4dbf68b50c | ||
|
|
e24b060eb6 | ||
|
|
e90358cd8e | ||
|
|
80865ff3f2 | ||
|
|
60469ec7ee | ||
|
|
67be8bd4f0 | ||
|
|
3682bf1cda | ||
|
|
e203f66fe0 | ||
|
|
04df922ac9 | ||
|
|
0326be86b5 | ||
|
|
a7ccd52a95 | ||
|
|
1ced4754db | ||
|
|
b872f74fd3 | ||
|
|
c1c48a8af5 | ||
|
|
b88e6dc918 | ||
|
|
3868837471 | ||
|
|
60b1596c4d | ||
|
|
4f0dcd5e6e | ||
|
|
ac405c714f | ||
|
|
7d0e5a9dc4 | ||
|
|
feb1267fee | ||
|
|
7a9c297206 | ||
|
|
21deed0fb7 | ||
|
|
627791c54e | ||
|
|
3358bde42d | ||
|
|
9e45cffabc | ||
|
|
2c8ff7b36f | ||
|
|
a7ec0679b5 | ||
|
|
f717c5d852 | ||
|
|
0cec0ee6c3 | ||
|
|
2f392544a6 | ||
|
|
75ce8359eb | ||
|
|
f5cb04012e | ||
|
|
f461d1e024 | ||
|
|
bdbd0aaeb8 | ||
|
|
715d06a215 | ||
|
|
976a3af637 | ||
|
|
8f8d2d36c0 | ||
|
|
a264a097cc | ||
|
|
4330839bc1 | ||
|
|
835418d1e3 | ||
|
|
ae07dc7962 | ||
|
|
d071a0a90a | ||
|
|
2d7802c637 | ||
|
|
fcb663acde | ||
|
|
858dbbf038 | ||
|
|
49c2dd2cfc | ||
|
|
7a22e98c0f | ||
|
|
26da7c4114 | ||
|
|
7acb45d186 | ||
|
|
24a5000e47 | ||
|
|
65d920e13a | ||
|
|
d27d18b192 | ||
|
|
0e88085661 | ||
|
|
3f6435ac80 | ||
|
|
64b69d9216 | ||
|
|
13207a9de5 | ||
|
|
ab756ec094 | ||
|
|
499f05f34b | ||
|
|
0077654fb5 | ||
|
|
f8cac0fb41 | ||
|
|
607f086de9 | ||
|
|
3b18aee181 | ||
|
|
f43a7c67f2 | ||
|
|
199ddc6cdb | ||
|
|
023dfebc73 | ||
|
|
53c4a26184 | ||
|
|
5acc362f7e | ||
|
|
68edad13bc | ||
|
|
bb2f329562 | ||
|
|
5169209360 | ||
|
|
c4ca8a47b3 | ||
|
|
904898e405 | ||
|
|
7f96fcc51b | ||
|
|
24583864b4 | ||
|
|
db9153e8e4 | ||
|
|
cd6e3e8a09 | ||
|
|
fc7c530696 | ||
|
|
8f05f14b36 | ||
|
|
3b8701296b | ||
|
|
48f69c0762 | ||
|
|
75521fffbd | ||
|
|
8ba6c84d6b | ||
|
|
6ae829a555 | ||
|
|
89f4053c33 | ||
|
|
3332abbb5a | ||
|
|
76e9111833 | ||
|
|
81c53c26fb | ||
|
|
62aaec1e20 | ||
|
|
cc18b2f4d3 | ||
|
|
a938c305b4 | ||
|
|
b87d0abc6c | ||
|
|
768c2bd812 | ||
|
|
c8b8c6165d | ||
|
|
db866e6580 | ||
|
|
b50f1583f3 | ||
|
|
0be4e6fed8 | ||
|
|
e9bd530221 | ||
|
|
67cf86ad5e | ||
|
|
5a789fca4a | ||
|
|
295b3a74e9 | ||
|
|
d35c5db260 | ||
|
|
9c1e3c260a | ||
|
|
371f808aa4 | ||
|
|
8df65c1bcc | ||
|
|
6b6273fec1 | ||
|
|
6e90931837 | ||
|
|
bf49fa3c26 | ||
|
|
d4e7e15e50 | ||
|
|
7d1633230d | ||
|
|
db26c0d012 | ||
|
|
2b0acffe7f | ||
|
|
485fc538c3 | ||
|
|
be1b90d511 | ||
|
|
8977e5088e | ||
|
|
cc16a10a33 | ||
|
|
103bdfc688 | ||
|
|
ab92326dfb | ||
|
|
da6c270d46 | ||
|
|
86cd873e67 | ||
|
|
49e0ddf861 | ||
|
|
9ca95530fa | ||
|
|
a29032c2bf | ||
|
|
82de66bb90 | ||
|
|
b2b48c2a4d | ||
|
|
5f79abd828 | ||
|
|
1b1e994a80 | ||
|
|
fbc31dc99b | ||
|
|
2785a6d5ee | ||
|
|
2b444ea954 | ||
|
|
ae89cce593 | ||
|
|
749f4b776b | ||
|
|
0481eb4872 | ||
|
|
f1627c0b67 | ||
|
|
3d1b69e945 | ||
|
|
f8c870aa91 | ||
|
|
094f1974be | ||
|
|
c4d47faae5 | ||
|
|
582c382771 | ||
|
|
c5b0b3c326 | ||
|
|
57036d16f9 | ||
|
|
1c4b4c8393 | ||
|
|
0a3825e79e | ||
|
|
acf11262de |
4
.bazelrc
4
.bazelrc
@@ -16,7 +16,9 @@ run --host_force_python=PY2
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
|
||||
# Use minimal protobufs at runtime
|
||||
run --define ssz=minimal
|
||||
run --define ssz=mainnet
|
||||
test --define ssz=mainnet
|
||||
build --define ssz=mainnet
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
|
||||
@@ -11,11 +11,10 @@ build:remote-cache --strategy=Closure=standalone
|
||||
build:remote-cache --strategy=Genrule=standalone
|
||||
|
||||
# Build results backend.
|
||||
build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
build:remote-cache --bes_timeout=60s
|
||||
build:remote-cache --project_id=prysmaticlabs
|
||||
build:remote-cache --bes_upload_mode=fully_async
|
||||
#build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
#build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
#build:remote-cache --bes_timeout=60s
|
||||
#build:remote-cache --project_id=prysmaticlabs
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
build:remote-cache --disk_cache=
|
||||
|
||||
18
BUILD.bazel
18
BUILD.bazel
@@ -31,27 +31,21 @@ alias(
|
||||
alias(
|
||||
name = "grpc_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
@@ -143,3 +137,9 @@ common_files = {
|
||||
),
|
||||
tags = ["manual"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
toolchain(
|
||||
name = "built_cmake_toolchain",
|
||||
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
|
||||
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
|
||||
)
|
||||
|
||||
11
INTEROP.md
11
INTEROP.md
@@ -45,10 +45,9 @@ Open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
@@ -62,7 +61,6 @@ bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
specify which keys
|
||||
|
||||
### Launching from `genesis.ssz`
|
||||
|
||||
@@ -70,10 +68,9 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-genesis-state /path/to/genesis.ssz \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
223
README.md
223
README.md
@@ -1,195 +1,236 @@
|
||||
# Prysm: Ethereum 'Serenity' 2.0 Go Implementation
|
||||
# Prysm: An Ethereum 2.0 Client Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/commit/452ecf8e27c7852c7854597f2b1bb4a62b80c7ec)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v0.9.3)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
This is the Core repository for Prysm, [Prysmatic Labs](https://prysmaticlabs.com)' [Go](https://golang.org/) implementation of the Ethereum protocol 2.0 (Serenity).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
### Need assistance?
|
||||
A more detailed set of installation and usage instructions as well as explanations of each component are available on our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
**Interested in what's next?** Be sure to read our [Roadmap Reference Implementation](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md) document. This page outlines the basics of sharding as well as the various short-term milestones that we hope to achieve over the coming year.
|
||||
A more detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public in our testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) for more information on the project itself or to sign up as a validator on the network.
|
||||
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Dependencies](#dependencies)
|
||||
- [Installation](#installation)
|
||||
- [Build Via Docker](#build-via-docker)
|
||||
- [Build Via Bazel](#build-via-bazel)
|
||||
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
|
||||
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
|
||||
- [Installation](#installing-prysm)
|
||||
- [Build via Docker](#build-via-docker)
|
||||
- [Build via Bazel](#build-via-bazel)
|
||||
- [Connecting to the public testnet: running a beacon node](#connecting-to-the-testnet-running-a-beacon-node)
|
||||
- [Running via Docker](#running-via-docker)
|
||||
- [Running via Bazel](#running-via-bazel)
|
||||
- [Staking ETH: running a validator client](#staking-eth-running-a-validator-client)
|
||||
- [Activating your validator: depositing 3.2 Goerli ETH](#activating-your-validator-depositing-32-göerli-eth)
|
||||
- [Starting the validator with Bazel](#starting-the-validator-with-bazel)
|
||||
- [Setting up a local ETH2 development chain](#setting-up-a-local-eth2-development-chain)
|
||||
- [Installation and dependencies](#installation-and-dependencies)
|
||||
- [Running a local beacon node and validator client](#running-a-local-beacon-node-and-validator-client)
|
||||
- [Testing Prysm](#testing-prysm)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Dependencies
|
||||
Prysm can be installed either with Docker **(recommended method)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Docker installations:**
|
||||
- The latest release of [Docker](https://docs.docker.com/install/)
|
||||
Prysm can be installed either with Docker **\(recommended\)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Bazel installations:**
|
||||
- The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
- A modern UNIX operating system (MacOS included)
|
||||
#### **For Docker installations:**
|
||||
|
||||
## Installation
|
||||
* The latest release of [Docker](https://docs.docker.com/install/)
|
||||
|
||||
#### **For Bazel installations:**
|
||||
|
||||
* The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
* The latest release of `cmake`
|
||||
* The latest release of `git`
|
||||
* A modern UNIX operating system \(macOS included\)
|
||||
|
||||
## Installing Prysm
|
||||
|
||||
### Build via Docker
|
||||
|
||||
1. Ensure you are running the most recent version of Docker by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
docker -v
|
||||
```
|
||||
2. To pull the Prysm images from the server, issue the following commands:
|
||||
```
|
||||
|
||||
2. To pull the Prysm images, issue the following commands:
|
||||
|
||||
```text
|
||||
docker pull gcr.io/prysmaticlabs/prysm/validator:latest
|
||||
docker pull gcr.io/prysmaticlabs/prysm/beacon-chain:latest
|
||||
```
|
||||
|
||||
This process will also install any related dependencies.
|
||||
|
||||
### Build via Bazel
|
||||
|
||||
1. Open a terminal window. Ensure you are running the most recent version of Bazel by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
bazel version
|
||||
```
|
||||
2. Clone this repository and enter the directory:
|
||||
```
|
||||
|
||||
2. Clone Prysm's [main repository](https://github.com/prysmaticlabs/prysm) and enter the directory:
|
||||
|
||||
```text
|
||||
git clone https://github.com/prysmaticlabs/prysm
|
||||
cd prysm
|
||||
```
|
||||
3. Build both the beacon chain node implementation and the validator client:
|
||||
```
|
||||
|
||||
3. Build both the beacon chain node and the validator client:
|
||||
|
||||
```text
|
||||
bazel build //beacon-chain:beacon-chain
|
||||
bazel build //validator:validator
|
||||
```
|
||||
|
||||
Bazel will automatically pull and install any dependencies as well, including Go and necessary compilers.
|
||||
|
||||
Note that to build with the appropriate configuration for the Prysm testnet you should run:
|
||||
## Connecting to the testnet: running a beacon node
|
||||
|
||||
```
|
||||
bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
bazel build --define ssz=minimal //validator:validator
|
||||
```
|
||||
Below are instructions for initialising a beacon node and connecting to the public testnet. To further understand the role that the beacon node plays in Prysm, see [this section of the documentation.](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical)
|
||||
|
||||
The binaries will be created in an architecture-dependent subdirectory of `bazel-bin` and this information is supplied as part of bazel's build process. For example:
|
||||
|
||||
```
|
||||
$ bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
...
|
||||
Target //beacon-chain:beacon-chain up-to-date:
|
||||
bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain
|
||||
...
|
||||
```
|
||||
|
||||
Here it can be seen the beacon chain binary has been created at `bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain`
|
||||
|
||||
## Running an Ethereum 2.0 Beacon Node
|
||||
To understand the role that both the beacon node and validator play in Prysm, see [this section of our documentation](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical).
|
||||
**NOTE:** It is recommended to open up port 13000 on your local router to improve connectivity and receive more peers from the network. To do so, navigate to `192.168.0.1` in your browser and login if required. Follow along with the interface to modify your routers firewall settings. When this task is completed, append the parameter`--p2p-host-ip=$(curl -s ident.me)` to your selected beacon startup command presented in this section to use the newly opened port.
|
||||
|
||||
### Running via Docker
|
||||
|
||||
**Docker on Linux/Mac:**
|
||||
#### **Docker on Linux/macOS:**
|
||||
|
||||
To start your beacon node, issue the following command:
|
||||
|
||||
```
|
||||
docker run -v $HOME/prysm-data:/data -p 4000:4000 \
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--name beacon-node \
|
||||
--datadir=/data
|
||||
--datadir=/data \
|
||||
--init-sync-no-verify
|
||||
```
|
||||
|
||||
You can stop the beacon node using `Ctrl+c` or with the following command:
|
||||
The beacon node can be halted by either using `Ctrl+c` or with the command:
|
||||
|
||||
```
|
||||
```text
|
||||
docker stop beacon-node
|
||||
```
|
||||
|
||||
Then it can be restarted again with
|
||||
To restart the beacon node, issue the following command:
|
||||
|
||||
```
|
||||
```text
|
||||
docker start -ai beacon-node
|
||||
```
|
||||
|
||||
If you run into issues you can always delete the container like this:
|
||||
To delete a corrupted container, issue the following command:
|
||||
|
||||
```
|
||||
```text
|
||||
docker rm beacon-node
|
||||
```
|
||||
|
||||
and re-create it again and even reset the chain database adding the parameter `--clear-db` as specified here:
|
||||
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--clear-db` parameter:
|
||||
|
||||
```
|
||||
docker run -it -v $HOME/prysm-data:/data -p 4000:4000 \
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--name beacon-node \
|
||||
--datadir=/data \
|
||||
--clear-db
|
||||
```
|
||||
|
||||
**Docker on Windows:**
|
||||
#### **Docker on Windows:**
|
||||
|
||||
1) You will need to share the local drive you wish to mount to to container (e.g. C:).
|
||||
1. Enter Docker settings (right click the tray icon)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
1. You will need to 'share' the local drive you wish to mount to \(e.g. C:\).
|
||||
1. Enter Docker settings \(right click the tray icon\)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
|
||||
2. You will next need to create a directory named `/prysm/` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that `C:` is your prior-selected shared Drive.
|
||||
3. To run the beacon node, issue the following command:
|
||||
|
||||
2) You will next need to create a directory named ```/tmp/prysm-data/``` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that ```C:``` is your prior-selected shared Drive.
|
||||
|
||||
4) To run the beacon node, issue the following command:
|
||||
```
|
||||
docker run -it -v c:/tmp/prysm-data:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --clear-db
|
||||
```text
|
||||
docker run -it -v c:/prysm/:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --init-sync-no-verify --clear-db
|
||||
```
|
||||
|
||||
### Running via Bazel
|
||||
|
||||
1) To start your Beacon Node with Bazel, issue the following command:
|
||||
```
|
||||
bazel run //beacon-chain -- --clear-db --datadir=/tmp/prysm-data
|
||||
To start your Beacon Node with Bazel, issue the following command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- --clear-db --datadir=$HOME/prysm
|
||||
```
|
||||
|
||||
This will sync up the Beacon Node with the latest head block in the network.
|
||||
This will sync up the beacon node with the latest head block in the network.
|
||||
|
||||
|
||||
## Staking ETH: Running a Validator Client
|
||||
**NOTE:** The beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and **funds will lost**.
|
||||
|
||||
Once your beacon node is up and **completely synced** (otherwise you will lose validator funds since the validator will not be able to operate), the chain will be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract to activate your validator (discussed in the section below). First though, you will need to create a *validator client* to connect to this node in order to stake and participate. Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Activating Your Validator: Depositing 3.2 Goerli ETH
|
||||
## Staking ETH: Running a validator client
|
||||
|
||||
Using your validator deposit data from the previous step, follow the instructions found on https://prylabs.net/participate to make a deposit.
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into a [validator deposit contract](how-prysm-works/validator-deposit-contract.md) in order to activate your validator \(discussed in the section below\). First though, you will need to create this validator and connect to this node to participate in consensus.
|
||||
|
||||
It will take a while for the nodes in the network to process your deposit, but once your node is active, the validator will begin doing its responsibility. In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, you'll start gradually losing your deposit until you are removed from the system.
|
||||
Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Starting the validator with Bazel
|
||||
### Activating your validator: depositing 3.2 Göerli ETH
|
||||
|
||||
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](activating-a-validator.md)section of this documentation.
|
||||
|
||||
It will take a while for the nodes in the network to process a deposit. Once the node is active, the validator will immediately begin performing its responsibilities.
|
||||
|
||||
In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, a validator will start gradually losing its deposit until it is removed from the network entirely.
|
||||
|
||||
1. Open another terminal window. Enter your Prysm directory and run the validator by issuing the following command:
|
||||
```
|
||||
cd prysm
|
||||
bazel run //validator
|
||||
```
|
||||
**Congratulations, you are now running Ethereum 2.0 Phase 0!**
|
||||
|
||||
## Setting up a local ETH2 development chain
|
||||
|
||||
This section outlines the process of setting up Prysm for local testing with other Ethereum 2.0 client implementations. See the [INTEROP.md](https://github.com/prysmaticlabs/prysm/blob/master/INTEROP.md) file for advanced configuration options. For more background information on interoperability development, see [this blog post](https://blog.ethereum.org/2019/09/19/eth2-interop-in-review/).
|
||||
|
||||
### Installation and dependencies
|
||||
|
||||
To begin setting up a local ETH2 development chain, follow the **Bazel** instructions found in the [dependencies](https://github.com/prysmaticlabs/prysm#dependencies) and [installation](https://github.com/prysmaticlabs/prysm#installation) sections respectively.
|
||||
|
||||
### Running a local beacon node and validator client
|
||||
|
||||
The example below will generate a beacon genesis state and initiate Prysm with 64 validators with the genesis time set to your machines UNIX time.
|
||||
|
||||
Open up two terminal windows. In the first, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl https://prylabs.net/contract) \
|
||||
--clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
Wait a moment for the beacon chain to start. In the other terminal, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This command will kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
## Testing Prysm
|
||||
|
||||
**To run the unit tests of our system**, issue the command:
|
||||
```
|
||||
To run the unit tests of our system, issue the command:
|
||||
|
||||
```text
|
||||
bazel test //...
|
||||
```
|
||||
|
||||
**To run our linter**, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
```
|
||||
To run our linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
|
||||
```text
|
||||
golangci-lint run
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
We have put all of our contribution guidelines into [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md)! Check it out to get started.
|
||||
Want to get involved? Check out our [Contribution Guide](https://prysmaticlabs.gitbook.io/prysm/getting-involved/contribution-guidelines) to learn more!
|
||||
|
||||
## License
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
312
WORKSPACE
312
WORKSPACE
@@ -1,3 +1,5 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
@@ -8,21 +10,12 @@ http_archive(
|
||||
url = "https://github.com/bazelbuild/bazel-skylib/archive/0.8.0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "513c12397db1bc9aa46dd62f02dd94b49a9b5d17444d49b5a04c5a89f3053c1c",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "7fc87f4170011201b1690326e8c16c5d802836e3a0d617d8f75c3af2b23180c4",
|
||||
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,9 +28,18 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "9ff889216e28c918811b77999257d4ac001c26c1f7c7fb17a79bc28abf74182e",
|
||||
strip_prefix = "rules_docker-0.10.1",
|
||||
url = "https://github.com/bazelbuild/rules_docker/archive/v0.10.1.tar.gz",
|
||||
# sha256 = "9ff889216e28c918811b77999257d4ac001c26c1f7c7fb17a79bc28abf74182e",
|
||||
strip_prefix = "rules_docker-0.12.1",
|
||||
url = "https://github.com/bazelbuild/rules_docker/archive/v0.12.1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -57,14 +59,15 @@ git_repository(
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
name = "com_github_gogo_protobuf",
|
||||
commit = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c", # v1.2.1, as of 2019-03-03
|
||||
# v1.3.0 (latest) as of 2019-10-05
|
||||
commit = "0ca988a254f991240804bf9821f3450d87ccbb1b",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch",
|
||||
"//third_party:com_github_gogo_protobuf-equal.patch",
|
||||
],
|
||||
remote = "https://github.com/gogo/protobuf",
|
||||
shallow_since = "1550471403 +0200",
|
||||
shallow_since = "1567336231 +0200",
|
||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||
)
|
||||
|
||||
@@ -96,6 +99,24 @@ load(
|
||||
|
||||
_go_image_repos()
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
http_archive(
|
||||
name = "prysm_testnet_site",
|
||||
build_file_content = """
|
||||
@@ -128,9 +149,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "386335fc3b055fad37088bd821929ff684bc00bb1a74e044e4b377ebd6e88fce",
|
||||
# File names are normally too long, see: https://github.com/ethereum/eth2.0-spec-tests/issues/15
|
||||
url = "https://prysmaticlabs.com/uploads/v0.8.3_general_spectests.tar.gz",
|
||||
sha256 = "72c6ee3c20d19736b1203f364a6eb0ddee2c173073e20bee2beccd288fdc42be",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/general.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -145,8 +165,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7ab89a364796e3f8a9af84750c241e9c9e2170a34c1a4e160fdfa2cee5b03fb7",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.8.3/minimal.tar.gz",
|
||||
sha256 = "a3cc860a3679f6f62ee57b65677a9b48a65fdebb151cdcbf50f23852632845ef",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/minimal.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -161,8 +181,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "6274e3b77f393faf7b17cef10e93244c16316d3b7ae9c6b844501b12f432a7c3",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.8.3/mainnet.tar.gz",
|
||||
sha256 = "8fc1b6220973ca30fa4ddc4ed24d66b1719abadca8bedb5e06c3bd9bc0df28e9",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/mainnet.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -172,6 +192,13 @@ http_archive(
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_github_herumi_bls_eth_go_binary",
|
||||
sha256 = "15a41ddb0bf7d142ebffae68337f19c16e747676cb56794c5d80dbe388ce004c",
|
||||
strip_prefix = "bls-go-binary-ac038c7cb6d3185c4a46f3bca0c99ebf7b191e16",
|
||||
url = "https://github.com/nisdas/bls-go-binary/archive/ac038c7cb6d3185c4a46f3bca0c99ebf7b191e16.zip",
|
||||
)
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
@@ -184,7 +211,7 @@ go_repository(
|
||||
|
||||
git_repository(
|
||||
name = "com_google_protobuf",
|
||||
commit = "09745575a923640154bcf307fba8aedff47f240a",
|
||||
commit = "d09d649aea36f02c03f8396ba39a8d4db8a607e4",
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1558721209 -0700",
|
||||
)
|
||||
@@ -193,6 +220,28 @@ load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
http_archive(
|
||||
name = "rules_foreign_cc",
|
||||
strip_prefix = "rules_foreign_cc-master",
|
||||
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/master.zip",
|
||||
)
|
||||
|
||||
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
|
||||
|
||||
rules_foreign_cc_dependencies([
|
||||
"@prysm//:built_cmake_toolchain",
|
||||
])
|
||||
|
||||
http_archive(
|
||||
name = "librdkafka",
|
||||
build_file_content = all_content,
|
||||
strip_prefix = "librdkafka-1.2.1",
|
||||
urls = ["https://github.com/edenhill/librdkafka/archive/v1.2.1.tar.gz"],
|
||||
)
|
||||
|
||||
# External dependencies
|
||||
|
||||
go_repository(
|
||||
@@ -210,7 +259,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_go_ssz",
|
||||
commit = "7e767fb53d02ea220428a6cc0850ee6e17d71bb1",
|
||||
commit = "e24db4d9e9637cf88ee9e4a779e339a1686a84ee",
|
||||
importpath = "github.com/prysmaticlabs/go-ssz",
|
||||
)
|
||||
|
||||
@@ -590,22 +639,23 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "io_opencensus_go",
|
||||
commit = "7bbec1755a8162b5923fc214a494773a701d506a", # v0.22.0
|
||||
importpath = "go.opencensus.io",
|
||||
sum = "h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=",
|
||||
version = "v0.22.2",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "io_opencensus_go_contrib_exporter_jaeger",
|
||||
commit = "5b8293c22f362562285c2acbc52f4a1870a47a33",
|
||||
importpath = "contrib.go.opencensus.io/exporter/jaeger",
|
||||
remote = "http://github.com/census-ecosystem/opencensus-go-exporter-jaeger",
|
||||
vcs = "git",
|
||||
sum = "h1:nhTv/Ry3lGmqbJ/JGvCjWxBl5ozRfqo86Ngz59UAlfk=",
|
||||
version = "v0.2.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_google_api",
|
||||
commit = "aac82e61c0c8fe133c297b4b59316b9f481e1f0a", # v0.6.0
|
||||
importpath = "google.golang.org/api",
|
||||
sum = "h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE=",
|
||||
version = "v0.14.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -695,8 +745,9 @@ go_repository(
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_pubsub",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "9f04364996b415168f0e0d7e9fc82272fbed4005", # v0.1.1
|
||||
importpath = "github.com/libp2p/go-libp2p-pubsub",
|
||||
sum = "h1:+Iz8zeI1KO6HX8cexU9g98cCGjae52Vujeg087SkuME=",
|
||||
version = "v0.2.6-0.20191219233527-97846b574895",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -787,8 +838,9 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_discovery",
|
||||
commit = "d248d63b0af8c023307da18ad7000a12020e06f0", # v0.1.0
|
||||
importpath = "github.com/libp2p/go-libp2p-discovery",
|
||||
sum = "h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=",
|
||||
version = "v0.2.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -831,8 +883,9 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_google_gofuzz",
|
||||
commit = "f140a6486e521aad38f5917de355cbf147cc0496", # v1.0.0
|
||||
importpath = "github.com/google/gofuzz",
|
||||
sum = "h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -994,12 +1047,6 @@ go_repository(
|
||||
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_karlseguin_ccache",
|
||||
commit = "ec06cd93a07565b373789b0078ba88fe697fddd9", # v2.0.3
|
||||
importpath = "github.com/karlseguin/ccache",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_connmgr",
|
||||
commit = "b46e9bdbcd8436b4fe4b30a53ec913c07e5e09c9", # v0.1.1
|
||||
@@ -1014,7 +1061,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "grpc_ecosystem_grpc_gateway",
|
||||
commit = "e652ba06e9067ef41c199af59b9c6c67724850d4",
|
||||
commit = "da7a886035e25b2f274f89b6f3c64bf70a9f6780",
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway",
|
||||
)
|
||||
|
||||
@@ -1200,10 +1247,20 @@ go_repository(
|
||||
importpath = "github.com/googleapis/gnostic",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_patrickmn_go_cache",
|
||||
commit = "46f407853014144407b6c2ec7ccc76bf67958d93",
|
||||
importpath = "github.com/patrickmn/go-cache",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_ethereumapis",
|
||||
commit = "b4ca5785e074dd8fed39f18a61aae0318b57b4b0",
|
||||
commit = "87118fb893cc6f32b25793d819790fd3bcce3221",
|
||||
importpath = "github.com/prysmaticlabs/ethereumapis",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:com_github_prysmaticlabs_ethereumapis-tags.patch",
|
||||
],
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1239,13 +1296,6 @@ go_repository(
|
||||
version = "v0.0.0-20161005185022-dfcf01d20ee9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_kilic_bls12-381",
|
||||
importpath = "github.com/kilic/bls12-381",
|
||||
sum = "h1:hCD4IWWYsETkACK7U+isYppKfB/6d54sBkCDk3k+w2U=",
|
||||
version = "v0.0.0-20191005202515-c798d6202457",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_minio_highwayhash",
|
||||
importpath = "github.com/minio/highwayhash",
|
||||
@@ -1259,3 +1309,167 @@ go_repository(
|
||||
sum = "h1:n9HxLrNxWWtEb1cA950nuEEj3QnKbtsCJ6KjcgisNUs=",
|
||||
version = "v0.0.0-20191002040644-a1355ae1e2c3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_confluentinc_confluent_kafka_go_v1",
|
||||
importpath = "gopkg.in/confluentinc/confluent-kafka-go.v1",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:in_gopkg_confluentinc_confluent_kafka_go_v1.patch"],
|
||||
sum = "h1:roy97m/3wj9/o8OuU3sZ5wildk30ep38k2x8nhNbKrI=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_naoina_toml",
|
||||
importpath = "github.com/naoina/toml",
|
||||
sum = "h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=",
|
||||
version = "v0.1.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_elastic_gosigar",
|
||||
importpath = "github.com/elastic/gosigar",
|
||||
sum = "h1:GzPQ+78RaAb4J63unidA/JavQRKrB6s8IOzN6Ib59jo=",
|
||||
version = "v0.10.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_urfave_cli_v1",
|
||||
importpath = "gopkg.in/urfave/cli.v1",
|
||||
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
|
||||
version = "v1.20.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_naoina_go_stringutil",
|
||||
importpath = "github.com/naoina/go-stringutil",
|
||||
sum = "h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_influxdata_influxdb",
|
||||
importpath = "github.com/influxdata/influxdb",
|
||||
sum = "h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=",
|
||||
version = "v1.7.9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_robertkrimen_otto",
|
||||
importpath = "github.com/robertkrimen/otto",
|
||||
sum = "h1:1VUlQbCfkoSGv7qP7Y+ro3ap1P1pPZxgdGVqiTVy5C4=",
|
||||
version = "v0.0.0-20180617131154-15f95af6e78d",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_peterh_liner",
|
||||
importpath = "github.com/peterh/liner",
|
||||
sum = "h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_graph_gophers_graphql_go",
|
||||
importpath = "github.com/graph-gophers/graphql-go",
|
||||
sum = "h1:HwRCZlPXN00r58jaIPE11HXn7EvhheQrE+Cxw0vkrH0=",
|
||||
version = "v0.0.0-20191031232829-adde0d0f76a3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_rjeczalik_notify",
|
||||
importpath = "github.com/rjeczalik/notify",
|
||||
sum = "h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=",
|
||||
version = "v0.9.2",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_mohae_deepcopy",
|
||||
importpath = "github.com/mohae/deepcopy",
|
||||
sum = "h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=",
|
||||
version = "v0.0.0-20170929034955-c48cc78d4826",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_olebedev_go_duktape_v3",
|
||||
importpath = "gopkg.in/olebedev/go-duktape.v3",
|
||||
sum = "h1:uuol9OUzSvZntY1v963NAbVd7A+PHLMz1FlCe3Lorcs=",
|
||||
version = "v3.0.0-20190709231704-1e4459ed25ff",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_sourcemap_v1",
|
||||
importpath = "gopkg.in/sourcemap.v1",
|
||||
sum = "h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=",
|
||||
version = "v1.0.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_fatih_color",
|
||||
importpath = "github.com/fatih/color",
|
||||
sum = "h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_protolambda_zssz",
|
||||
commit = "632f11e5e281660402bd0ac58f76090f3503def0",
|
||||
importpath = "github.com/protolambda/zssz",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_emicklei_dot",
|
||||
commit = "f4a04130244d60cef56086d2f649b4b55e9624aa",
|
||||
importpath = "github.com/emicklei/dot",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_googleapis_gax_go_v2",
|
||||
importpath = "github.com/googleapis/gax-go/v2",
|
||||
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
|
||||
version = "v2.0.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_golang_groupcache",
|
||||
importpath = "github.com/golang/groupcache",
|
||||
sum = "h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=",
|
||||
version = "v0.0.0-20191027212112-611e8accdfc9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_uber_jaeger_client_go",
|
||||
importpath = "github.com/uber/jaeger-client-go",
|
||||
sum = "h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU=",
|
||||
version = "v2.20.1+incompatible",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_dgraph_io_ristretto",
|
||||
commit = "99d1bbbf28e64530eb246be0568fc7709a35ebdd",
|
||||
importpath = "github.com/dgraph-io/ristretto",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_cespare_xxhash",
|
||||
commit = "d7df74196a9e781ede915320c11c378c1b2f3a1f",
|
||||
importpath = "github.com/cespare/xxhash",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ipfs_go_detect_race",
|
||||
importpath = "github.com/ipfs/go-detect-race",
|
||||
sum = "h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=",
|
||||
version = "v0.0.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_dgraph_io_ristretto",
|
||||
commit = "99d1bbbf28e64530eb246be0568fc7709a35ebdd",
|
||||
importpath = "github.com/dgraph-io/ristretto",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_cespare_xxhash",
|
||||
commit = "d7df74196a9e781ede915320c11c378c1b2f3a1f",
|
||||
importpath = "github.com/cespare/xxhash",
|
||||
)
|
||||
|
||||
@@ -36,6 +36,7 @@ go_image(
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
base = "//tools:cc_image",
|
||||
goarch = "amd64",
|
||||
goos = "linux",
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
@@ -79,7 +80,10 @@ docker_push(
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -7,13 +7,15 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -24,14 +26,17 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -20,31 +22,33 @@ var log = logrus.WithField("prefix", "archiver")
|
||||
// Service defining archiver functionality for persisting checkpointed
|
||||
// beacon chain information to a database backend for historical purposes.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
headFetcher blockchain.HeadFetcher
|
||||
newHeadNotifier blockchain.NewHeadNotifier
|
||||
newHeadRootChan chan [32]byte
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
headFetcher blockchain.HeadFetcher
|
||||
participationFetcher blockchain.ParticipationFetcher
|
||||
stateNotifier statefeed.Notifier
|
||||
lastArchivedEpoch uint64
|
||||
}
|
||||
|
||||
// Config options for the archiver service.
|
||||
type Config struct {
|
||||
BeaconDB db.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
NewHeadNotifier blockchain.NewHeadNotifier
|
||||
BeaconDB db.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ParticipationFetcher blockchain.ParticipationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewArchiverService initializes the service from configuration options.
|
||||
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
newHeadNotifier: cfg.NewHeadNotifier,
|
||||
newHeadRootChan: make(chan [32]byte, 1),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
participationFetcher: cfg.ParticipationFetcher,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,50 +70,45 @@ func (s *Service) Status() error {
|
||||
}
|
||||
|
||||
// We archive committee information pertaining to the head state's epoch.
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState) error {
|
||||
currentEpoch := helpers.SlotToEpoch(headState.Slot)
|
||||
committeeCount, err := helpers.CommitteeCount(headState, currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get committee count")
|
||||
}
|
||||
seed, err := helpers.Seed(headState, currentEpoch)
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
startShard, err := helpers.StartShard(headState, currentEpoch)
|
||||
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get start shard")
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get beacon proposer index")
|
||||
|
||||
info := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
info := ðpb.ArchivedCommitteeInfo{
|
||||
Seed: seed[:],
|
||||
StartShard: startShard,
|
||||
CommitteeCount: committeeCount,
|
||||
ProposerIndex: proposerIndex,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, currentEpoch, info); err != nil {
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
||||
return errors.Wrap(err, "could not archive committee info")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive active validator set changes that happened during the epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState) error {
|
||||
activations := validators.ActivatedValidatorIndices(headState)
|
||||
slashings := validators.SlashedValidatorIndices(headState)
|
||||
exited, err := validators.ExitedValidatorIndices(headState)
|
||||
// We archive active validator set changes that happened during the previous epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
prevEpoch := epoch - 1
|
||||
activations := validators.ActivatedValidatorIndices(prevEpoch, headState.Validators)
|
||||
slashings := validators.SlashedValidatorIndices(prevEpoch, headState.Validators)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
exited, err := validators.ExitedValidatorIndices(prevEpoch, headState.Validators, activeValidatorCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine exited validator indices")
|
||||
}
|
||||
activeSetChanges := ðpb.ArchivedActiveSetChanges{
|
||||
activeSetChanges := &pb.ArchivedActiveSetChanges{
|
||||
Activated: activations,
|
||||
Exited: exited,
|
||||
Slashed: slashings,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, helpers.CurrentEpoch(headState), activeSetChanges); err != nil {
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
|
||||
return errors.Wrap(err, "could not archive active validator set changes")
|
||||
}
|
||||
return nil
|
||||
@@ -117,60 +116,78 @@ func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.Bea
|
||||
|
||||
// We compute participation metrics by first retrieving the head state and
|
||||
// matching validator attestations during the epoch.
|
||||
func (s *Service) archiveParticipation(ctx context.Context, headState *pb.BeaconState) error {
|
||||
participation, err := epoch.ComputeValidatorParticipation(headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute participation")
|
||||
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
|
||||
p := s.participationFetcher.Participation(epoch)
|
||||
participation := ðpb.ValidatorParticipation{}
|
||||
if p != nil {
|
||||
participation = ðpb.ValidatorParticipation{
|
||||
EligibleEther: p.PrevEpoch,
|
||||
VotedEther: p.PrevEpochTargetAttesters,
|
||||
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
|
||||
}
|
||||
}
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, helpers.SlotToEpoch(headState.Slot), participation)
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
|
||||
}
|
||||
|
||||
// We archive validator balances and active indices.
|
||||
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState) error {
|
||||
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
balances := headState.Balances
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, currentEpoch, balances); err != nil {
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
||||
return errors.Wrap(err, "could not archive balances")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) run(ctx context.Context) {
|
||||
sub := s.newHeadNotifier.HeadUpdatedFeed().Subscribe(s.newHeadRootChan)
|
||||
defer sub.Unsubscribe()
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case r := <-s.newHeadRootChan:
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", r)).Debug("New chain head event")
|
||||
headState := s.headFetcher.HeadState()
|
||||
if !helpers.IsEpochEnd(headState.Slot) {
|
||||
continue
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.BlockProcessed {
|
||||
data := event.Data.(*statefeed.BlockProcessedData)
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
|
||||
headState, err := s.headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Head state is not available")
|
||||
continue
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
if !helpers.IsEpochEnd(headState.Slot) && currentEpoch <= s.lastArchivedEpoch {
|
||||
continue
|
||||
}
|
||||
epochToArchive := currentEpoch
|
||||
if !helpers.IsEpochEnd(headState.Slot) {
|
||||
epochToArchive--
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
epochToArchive,
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
s.lastArchivedEpoch = epochToArchive
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
helpers.CurrentEpoch(headState),
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.WithError(err).Error("Subscription to new chain head notifier failed")
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state feed notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,13 +8,16 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -27,17 +30,24 @@ func init() {
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
}
|
||||
|
||||
func TestArchiverService_ReceivesNewChainHeadEvent(t *testing.T) {
|
||||
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: 1},
|
||||
}
|
||||
headRoot := [32]byte{1, 2, 3}
|
||||
triggerNewHeadEvent(t, svc, headRoot)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", headRoot))
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
}
|
||||
|
||||
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
@@ -46,20 +56,77 @@ func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
// The head state is NOT an epoch end.
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 3},
|
||||
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 2},
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// The service should ONLY log any archival logs if we receive a
|
||||
// head slot that is an epoch end.
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send out an event every slot, skipping the end slot of the epoch.
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
|
||||
headState.Slot = i
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
if helpers.IsEpochEnd(i) {
|
||||
continue
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// Even though there was a skip slot, we should still be able to archive
|
||||
// upon the next block event afterwards.
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
@@ -69,9 +136,17 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
attestedBalance := uint64(1)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
@@ -85,7 +160,7 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
}
|
||||
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch, wanted, retrieved)
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
@@ -99,7 +174,14 @@ func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
@@ -125,30 +207,27 @@ func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
startShard, err := helpers.StartShard(headState, currentEpoch)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
committeeCount, err := helpers.CommitteeCount(headState, currentEpoch)
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
seed, err := helpers.Seed(headState, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
propIdx, err := helpers.BeaconProposerIndex(headState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := ðpb.ArchivedCommitteeInfo{
|
||||
Seed: seed[:],
|
||||
StartShard: startShard,
|
||||
CommitteeCount: committeeCount,
|
||||
ProposerIndex: propIdx,
|
||||
wanted := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
@@ -175,16 +254,26 @@ func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
delayedActEpoch := helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
delayedActEpoch := helpers.DelayedActivationExitEpoch(prevEpoch)
|
||||
headState.Validators[4].ActivationEpoch = delayedActEpoch
|
||||
headState.Validators[5].ActivationEpoch = delayedActEpoch
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
|
||||
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
|
||||
}
|
||||
@@ -200,15 +289,25 @@ func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
headState.Validators[95].Slashed = true
|
||||
headState.Validators[96].Slashed = true
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
||||
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
||||
}
|
||||
@@ -224,19 +323,28 @@ func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
headState.Validators[95].ExitEpoch = currentEpoch + 1
|
||||
headState.Validators[95].WithdrawableEpoch = currentEpoch + 1 + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
headState.Validators[95].ExitEpoch = prevEpoch
|
||||
headState.Validators[95].WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
||||
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
@@ -251,14 +359,7 @@ func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Crosslink: ðpb.Crosslink{Shard: 0}, Target: ðpb.Checkpoint{}}}}
|
||||
var crosslinks []*ethpb.Crosslink
|
||||
for i := uint64(0); i < params.BeaconConfig().ShardCount; i++ {
|
||||
crosslinks = append(crosslinks, ðpb.Crosslink{
|
||||
StartEpoch: 0,
|
||||
DataRoot: []byte{'A'},
|
||||
})
|
||||
}
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
||||
|
||||
// We initialize a head state that has attestations from participated
|
||||
// validators in a simulated fashion.
|
||||
@@ -269,9 +370,6 @@ func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
ActiveIndexRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CompactCommitteesRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentCrosslinks: crosslinks,
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
@@ -282,23 +380,30 @@ func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
func setupService(t *testing.T) (*Service, db.Database) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
validatorCount := uint64(100)
|
||||
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
mockChainService := &mock.ChainService{}
|
||||
return &Service{
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
newHeadRootChan: make(chan [32]byte, 0),
|
||||
newHeadNotifier: &mock.ChainService{},
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stateNotifier: mockChainService.StateNotifier(),
|
||||
participationFetcher: &mock.ChainService{
|
||||
Balance: &precompute.Balance{PrevEpoch: totalBalance, PrevEpochTargetAttesters: 1}},
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func triggerNewHeadEvent(t *testing.T, svc *Service, headRoot [32]byte) {
|
||||
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
svc.newHeadRootChan <- headRoot
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -17,23 +17,26 @@ go_library(
|
||||
"//beacon-chain/blockchain/forkchoice:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -68,15 +71,16 @@ go_test(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
@@ -103,7 +107,6 @@ go_test(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -112,6 +115,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -27,8 +31,10 @@ type GenesisTimeFetcher interface {
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadRoot() []byte
|
||||
HeadBlock() *ethpb.BeaconBlock
|
||||
HeadState() *pb.BeaconState
|
||||
HeadBlock() *ethpb.SignedBeaconBlock
|
||||
HeadState(ctx context.Context) (*pb.BeaconState, error)
|
||||
HeadValidatorsIndices(epoch uint64) ([]uint64, error)
|
||||
HeadSeed(epoch uint64) ([32]byte, error)
|
||||
}
|
||||
|
||||
// CanonicalRootFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -43,19 +49,62 @@ type ForkFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization related data.
|
||||
// directly retrieves finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint tracked in fork choice service.
|
||||
// ParticipationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves validator participation related data.
|
||||
type ParticipationFetcher interface {
|
||||
Participation(epoch uint64) *precompute.Balance
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.forkChoiceStore.FinalizedCheckpt()
|
||||
if cp != nil {
|
||||
return cp
|
||||
if s.headState == nil || s.headState.FinalizedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
// If head state exists but there hasn't been a finalized check point,
|
||||
// the check point's root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.FinalizedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.headState == nil || s.headState.CurrentJustifiedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.CurrentJustifiedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.CurrentJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.headState == nil || s.headState.PreviousJustifiedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.PreviousJustifiedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.PreviousJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
@@ -80,19 +129,42 @@ func (s *Service) HeadRoot() []byte {
|
||||
}
|
||||
|
||||
// HeadBlock returns the head block of the chain.
|
||||
func (s *Service) HeadBlock() *ethpb.BeaconBlock {
|
||||
func (s *Service) HeadBlock() *ethpb.SignedBeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return proto.Clone(s.headBlock).(*ethpb.BeaconBlock)
|
||||
return proto.Clone(s.headBlock).(*ethpb.SignedBeaconBlock)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
func (s *Service) HeadState() *pb.BeaconState {
|
||||
// If the head state is nil from service struct,
|
||||
// it will attempt to get from DB and error if nil again.
|
||||
func (s *Service) HeadState(ctx context.Context) (*pb.BeaconState, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return proto.Clone(s.headState).(*pb.BeaconState)
|
||||
if s.headState == nil {
|
||||
return s.beaconDB.HeadState(ctx)
|
||||
}
|
||||
|
||||
return proto.Clone(s.headState).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if s.headState == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
if s.headState == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
return helpers.Seed(s.headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// CanonicalRoot returns the canonical root of a given slot.
|
||||
@@ -110,5 +182,19 @@ func (s *Service) GenesisTime() time.Time {
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
if s.headState == nil {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
}
|
||||
return proto.Clone(s.headState.Fork).(*pb.Fork)
|
||||
}
|
||||
|
||||
// Participation returns the participation stats of a given epoch.
|
||||
func (s *Service) Participation(epoch uint64) *precompute.Balance {
|
||||
s.epochParticipationLock.RLock()
|
||||
defer s.epochParticipationLock.RUnlock()
|
||||
|
||||
return s.epochParticipation[epoch]
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
@@ -18,7 +18,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
@@ -35,7 +35,7 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
@@ -52,7 +52,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
@@ -69,9 +69,9 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadState()
|
||||
s.HeadState(context.Background())
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
@@ -19,14 +20,19 @@ var _ = GenesisTimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState, _ = testutil.DeterministicGenesisState(t, 1)
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
if !bytes.Equal(c.HeadRoot(), params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
@@ -35,16 +41,81 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{FinalizedCheckpoint: cp}
|
||||
|
||||
if err := c.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
if c.FinalizedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Finalized epoch at genesis should be %d, got: %d", cp.Epoch, c.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
if c.FinalizedCheckpt().Epoch != 0 {
|
||||
t.Errorf("Finalized epoch at genesis should be 0, got: %d", c.FinalizedCheckpt().Epoch)
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{FinalizedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'A'}
|
||||
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.FinalizedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 6}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{CurrentJustifiedCheckpoint: cp}
|
||||
|
||||
if c.CurrentJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Current Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.CurrentJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{CurrentJustifiedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'B'}
|
||||
|
||||
if !bytes.Equal(c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{PreviousJustifiedCheckpoint: cp}
|
||||
|
||||
if c.PreviousJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Previous Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.PreviousJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{PreviousJustifiedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'C'}
|
||||
|
||||
if !bytes.Equal(c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +137,7 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := ðpb.BeaconBlock{Slot: 1}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
c := &Service{headBlock: b}
|
||||
if !reflect.DeepEqual(b, c.HeadBlock()) {
|
||||
t.Error("incorrect head block received")
|
||||
@@ -76,7 +147,11 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s := &pb.BeaconState{Slot: 2}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(s, c.HeadState()) {
|
||||
headState, err := c.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, headState) {
|
||||
t.Error("incorrect head state received")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,20 +15,23 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -56,11 +59,12 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ func BenchmarkForkChoiceTree1(b *testing.B) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -50,17 +50,11 @@ func BenchmarkForkChoiceTree1(b *testing.B) {
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 256:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 768:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,9 +104,7 @@ func BenchmarkForkChoiceTree2(b *testing.B) {
|
||||
// Spread out the votes evenly for all the leaf nodes. 8 to 15
|
||||
nodeIndex := 8
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[nodeIndex]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[nodeIndex]}
|
||||
if i%155 == 0 {
|
||||
nodeIndex++
|
||||
}
|
||||
@@ -163,9 +155,7 @@ func BenchmarkForkChoiceTree3(b *testing.B) {
|
||||
|
||||
// All validators vote on the same head
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
@@ -4,6 +4,6 @@ Sub-Tree) algorithm as the Ethereum Serenity beacon chain fork choice rule. This
|
||||
properly detect the canonical chain based on validator votes even in the presence of high network
|
||||
latency, network partitions, and many conflicting blocks. To read more about fork choice, read the
|
||||
official accompanying document:
|
||||
https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_fork-choice.md
|
||||
https://github.com/ethereum/eth2.0-specs/blob/v0.9.0/specs/core/0_fork-choice.md
|
||||
*/
|
||||
package forkchoice
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -39,6 +39,8 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
var c *Config
|
||||
err = yaml.Unmarshal(yamlFile, &c)
|
||||
|
||||
params.UseMainnetConfig()
|
||||
|
||||
for _, test := range c.TestCases {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
@@ -49,10 +51,10 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
// genesis block condition
|
||||
if blk.ID == blk.Parent {
|
||||
b := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -66,18 +68,23 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := ðpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
root, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[slot] = root[:]
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Assign validator votes to the blocks as weights.
|
||||
count := 0
|
||||
for blk, votes := range test.Weights {
|
||||
@@ -87,14 +94,11 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
}
|
||||
max := count + votes
|
||||
for i := count; i < max; i++ {
|
||||
if err := db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: blksRoot[slot]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: blksRoot[slot]}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
@@ -102,12 +106,10 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = blksRoot[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: blksRoot[0]}, ðpb.Checkpoint{Root: blksRoot[0]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -133,8 +135,6 @@ func TestGetHeadFromYaml(t *testing.T) {
|
||||
t.Errorf("wanted root %#x, got root %#x", wantedHead, head)
|
||||
}
|
||||
|
||||
helpers.ClearAllCaches()
|
||||
testDB.TeardownDB(t, db)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package forkchoice
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -17,6 +18,14 @@ var (
|
||||
Name: "beacon_finalized_root",
|
||||
Help: "Last finalized root of the processed state",
|
||||
})
|
||||
cacheFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "cache_finalized_epoch",
|
||||
Help: "Last cached finalized epoch",
|
||||
})
|
||||
cacheFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "cache_finalized_root",
|
||||
Help: "Last cached finalized root",
|
||||
})
|
||||
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_epoch",
|
||||
Help: "Current justified epoch of the processed state",
|
||||
@@ -33,46 +42,98 @@ var (
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
activeValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_active_validators",
|
||||
Help: "Total number of active validators",
|
||||
sigFailsToVerify = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "att_signature_failed_to_verify_with_cache",
|
||||
Help: "Number of attestation signatures that failed to verify with cache on, but succeeded without cache",
|
||||
})
|
||||
slashedValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_slashed_validators",
|
||||
Help: "Total slashed validators",
|
||||
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validator_count",
|
||||
Help: "The total number of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
validatorsBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_balance",
|
||||
Help: "The total balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
validatorsEffectiveBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_effective_balance",
|
||||
Help: "The total effective balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
currentEth1DataDepositCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "current_eth1_data_deposit_count",
|
||||
Help: "The current eth1 deposit count in the last processed state eth1data field.",
|
||||
})
|
||||
withdrawnValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_withdrawn_validators",
|
||||
Help: "Total withdrawn validators",
|
||||
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_eligible_balances",
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
|
||||
})
|
||||
totalValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_validators",
|
||||
Help: "Number of status=pending|active|exited|withdrawable validators in current epoch",
|
||||
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_voted_target_balances",
|
||||
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
|
||||
})
|
||||
)
|
||||
|
||||
func reportEpochMetrics(state *pb.BeaconState) {
|
||||
currentEpoch := state.Slot / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator counts
|
||||
var active float64
|
||||
var slashed float64
|
||||
var withdrawn float64
|
||||
for _, v := range state.Validators {
|
||||
if v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch {
|
||||
active++
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
activeInstances := 0
|
||||
slashingInstances := 0
|
||||
slashedInstances := 0
|
||||
exitingInstances := 0
|
||||
exitedInstances := 0
|
||||
// Validator balances
|
||||
pendingBalance := uint64(0)
|
||||
activeBalance := uint64(0)
|
||||
activeEffectiveBalance := uint64(0)
|
||||
exitingBalance := uint64(0)
|
||||
exitingEffectiveBalance := uint64(0)
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range state.Validators {
|
||||
if validator.Slashed {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
slashingInstances++
|
||||
slashingBalance += state.Balances[i]
|
||||
slashingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
slashedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if v.Slashed {
|
||||
slashed++
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
exitingInstances++
|
||||
exitingBalance += state.Balances[i]
|
||||
exitingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
exitedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if currentEpoch >= v.ExitEpoch {
|
||||
withdrawn++
|
||||
if currentEpoch < validator.ActivationEpoch {
|
||||
pendingInstances++
|
||||
pendingBalance += state.Balances[i]
|
||||
continue
|
||||
}
|
||||
activeInstances++
|
||||
activeBalance += state.Balances[i]
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
}
|
||||
activeValidatorsGauge.Set(active)
|
||||
slashedValidatorsGauge.Set(slashed)
|
||||
withdrawnValidatorsGauge.Set(withdrawn)
|
||||
totalValidatorsGauge.Set(float64(len(state.Validators)))
|
||||
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
|
||||
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
|
||||
validatorsCount.WithLabelValues("Exited").Set(float64(exitedInstances))
|
||||
validatorsCount.WithLabelValues("Slashing").Set(float64(slashingInstances))
|
||||
validatorsCount.WithLabelValues("Slashed").Set(float64(slashedInstances))
|
||||
validatorsBalance.WithLabelValues("Pending").Set(float64(pendingBalance))
|
||||
validatorsBalance.WithLabelValues("Active").Set(float64(activeBalance))
|
||||
validatorsBalance.WithLabelValues("Exiting").Set(float64(exitingBalance))
|
||||
validatorsBalance.WithLabelValues("Slashing").Set(float64(slashingBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Active").Set(float64(activeEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Exiting").Set(float64(exitingEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
|
||||
|
||||
// Last justified slot
|
||||
if state.CurrentJustifiedCheckpoint != nil {
|
||||
@@ -89,4 +150,12 @@ func reportEpochMetrics(state *pb.BeaconState) {
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpoint.Epoch))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
if state.Eth1Data != nil {
|
||||
currentEth1DataDepositCount.Set(float64(state.Eth1Data.DepositCount))
|
||||
}
|
||||
|
||||
if precompute.Balances != nil {
|
||||
totalEligibleBalances.Set(float64(precompute.Balances.PrevEpoch))
|
||||
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttesters))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,132 +7,144 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// OnAttestation is called whenever an attestation is received, it updates validators latest vote,
|
||||
// as well as the fork choice store struct.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
// """
|
||||
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
//
|
||||
// An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
// consider scheduling it for later processing in such case.
|
||||
// """
|
||||
// target = attestation.data.target
|
||||
//
|
||||
// # Cannot calculate the current shuffling if have not seen the target
|
||||
// assert target.root in store.blocks
|
||||
// # Attestations must be from the current or previous epoch
|
||||
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
// # Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
// previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
// assert target.epoch in [current_epoch, previous_epoch]
|
||||
// assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
//
|
||||
// # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
// assert target.root in store.blocks
|
||||
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
// base_state = store.block_states[target.root].copy()
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
// assert attestation.data.beacon_block_root in store.blocks
|
||||
// # Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
//
|
||||
// # Store target checkpoint state if not yet seen
|
||||
// if target not in store.checkpoint_states:
|
||||
// process_slots(base_state, compute_start_slot_of_epoch(target.epoch))
|
||||
// process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||
// store.checkpoint_states[target] = base_state
|
||||
// target_state = store.checkpoint_states[target]
|
||||
//
|
||||
// # Attestations can only affect the fork choice of subsequent slots.
|
||||
// # Delay consideration in the fork choice until their slot is in the past.
|
||||
// attestation_slot = get_attestation_data_slot(target_state, attestation.data)
|
||||
// assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
|
||||
// assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Get state at the `target` to validate attestation and calculate the committees
|
||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
//
|
||||
// # Update latest messages
|
||||
// for i in indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices:
|
||||
// for i in indexed_attestation.attesting_indices:
|
||||
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
tgt := proto.Clone(a.Data.Target).(*ethpb.Checkpoint)
|
||||
tgtSlot := helpers.StartSlot(tgt.Epoch)
|
||||
|
||||
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
|
||||
return fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return 0, fmt.Errorf("target root %#x does not exist in db", bytesutil.Trunc(tgt.Root))
|
||||
return ErrTargetRootNotInDB
|
||||
}
|
||||
|
||||
// Verify attestation target has had a valid pre state produced by the target block.
|
||||
baseState, err := s.verifyAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, baseState.GenesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify Attestations cannot be from future epochs.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, tgtSlot); err != nil {
|
||||
return 0, errors.Wrap(err, "could not verify attestation target slot")
|
||||
return errors.Wrap(err, "could not verify attestation target slot")
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Store target checkpoint state if not yet seen.
|
||||
baseState, err = s.saveCheckpointState(ctx, baseState, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Delay attestation processing until the subsequent slot.
|
||||
if err := s.waitForAttInclDelay(ctx, a, baseState); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := s.verifyAttSlotTime(ctx, baseState, a.Data); err != nil {
|
||||
return 0, err
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, a.Data.Slot+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
atts := make([]*ethpb.Attestation, 0, len(s.attsQueue))
|
||||
for root, a := range s.attsQueue {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
|
||||
"Root": fmt.Sprintf("%#x", root),
|
||||
})
|
||||
log.Debug("Updating latest votes")
|
||||
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Removing attestation from queue.")
|
||||
delete(s.attsQueue, root)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update every validator's latest vote.
|
||||
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Mark attestation as seen we don't update votes when it appears in block.
|
||||
if err := s.setSeenAtt(a); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
delete(s.attsQueue, root)
|
||||
att, err := s.aggregatedAttestations(ctx, a)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
atts = append(atts, att...)
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return 0, err
|
||||
// Update every validator's latest vote.
|
||||
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tgtSlot, nil
|
||||
if err := s.db.SaveAttestation(ctx, a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"Slot": a.Data.Slot,
|
||||
"Index": a.Data.CommitteeIndex,
|
||||
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
})
|
||||
log.Debug("Updated latest votes")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttPreState validates input attested check point has a valid pre-state.
|
||||
@@ -147,8 +159,41 @@ func (s *Store) verifyAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*pb
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Store) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return fmt.Errorf("target epoch %d does not match current epoch %d or prev epoch %d", c.Epoch, currentEpoch, prevEpoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
||||
func (s *Store) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
||||
b, err := s.db.Block(ctx, bytesutil.ToBytes32(data.BeaconBlockRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, %d > %d", b.Block.Slot, data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveCheckpointState saves and returns the processed state with the associated check point.
|
||||
func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconState, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.saveCheckpointState")
|
||||
defer span.End()
|
||||
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
@@ -162,83 +207,57 @@ func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconSta
|
||||
// Advance slots only when it's higher than current state slot.
|
||||
if helpers.StartSlot(c.Epoch) > baseState.Slot {
|
||||
stateCopy := proto.Clone(baseState).(*pb.BeaconState)
|
||||
baseState, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
|
||||
stateCopy, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: baseState,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: stateCopy,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
|
||||
return stateCopy, nil
|
||||
}
|
||||
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// waitForAttInclDelay waits until the next slot because attestation can only affect
|
||||
// fork choice of subsequent slot. This is to delay attestation inclusion for fork choice
|
||||
// until the attested slot is in the past.
|
||||
func (s *Store) waitForAttInclDelay(ctx context.Context, a *ethpb.Attestation, targetState *pb.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.forkchoice.waitForAttInclDelay")
|
||||
defer span.End()
|
||||
|
||||
slot, err := helpers.AttestationDataSlot(targetState, a.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attestation slot")
|
||||
}
|
||||
|
||||
nextSlot := slot + 1
|
||||
duration := time.Duration(nextSlot*params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
timeToInclude := time.Unix(int64(targetState.GenesisTime), 0).Add(duration)
|
||||
|
||||
if err := s.aggregateAttestation(ctx, a); err != nil {
|
||||
return errors.Wrap(err, "could not aggregate attestation")
|
||||
}
|
||||
|
||||
time.Sleep(time.Until(timeToInclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregateAttestation aggregates the attestations in the pending queue.
|
||||
func (s *Store) aggregateAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
root, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a, ok := s.attsQueue[root]; ok {
|
||||
a, err := helpers.AggregateAttestation(a, att)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = a
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = proto.Clone(att).(*ethpb.Attestation)
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttSlotTime validates input attestation is not from the future.
|
||||
func (s *Store) verifyAttSlotTime(ctx context.Context, baseState *pb.BeaconState, d *ethpb.AttestationData) error {
|
||||
aSlot, err := helpers.AttestationDataSlot(baseState, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attestation slot")
|
||||
}
|
||||
return helpers.VerifySlotTime(baseState.GenesisTime, aSlot+1)
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Store) verifyAttestation(ctx context.Context, baseState *pb.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, a)
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
|
||||
// TODO(3603): Delete the following signature verify fallback when issue 3603 closes.
|
||||
// When signature fails to verify with committee cache enabled at run time,
|
||||
// the following re-runs the same signature verify routine without cache in play.
|
||||
// This provides extra assurance that committee cache can't break run time.
|
||||
if err == blocks.ErrSigFailedToVerify {
|
||||
committee, err = helpers.BeaconCommitteeWithoutCache(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation without cache")
|
||||
}
|
||||
indexedAtt, err = blocks.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation without cache")
|
||||
}
|
||||
sigFailsToVerify.Inc()
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
@@ -251,36 +270,18 @@ func (s *Store) updateAttVotes(
|
||||
tgtRoot []byte,
|
||||
tgtEpoch uint64) error {
|
||||
|
||||
indices := append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...)
|
||||
newVoteIndices := make([]uint64, 0, len(indices))
|
||||
newVotes := make([]*pb.ValidatorLatestVote, 0, len(indices))
|
||||
indices := indexedAtt.AttestingIndices
|
||||
s.voteLock.Lock()
|
||||
defer s.voteLock.Unlock()
|
||||
for _, i := range indices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgtEpoch > vote.Epoch {
|
||||
newVotes = append(newVotes, &pb.ValidatorLatestVote{
|
||||
vote, ok := s.latestVoteMap[i]
|
||||
if !ok || tgtEpoch > vote.Epoch {
|
||||
s.latestVoteMap[i] = &pb.ValidatorLatestVote{
|
||||
Epoch: tgtEpoch,
|
||||
Root: tgtRoot,
|
||||
})
|
||||
newVoteIndices = append(newVoteIndices, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.db.SaveValidatorLatestVotes(ctx, newVoteIndices, newVotes)
|
||||
}
|
||||
|
||||
// setSeenAtt sets the attestation hash in seen attestation map to true.
|
||||
func (s *Store) setSeenAtt(a *ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
r, err := hashutil.HashProto(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
@@ -24,39 +24,38 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
_, err := blockTree1(db)
|
||||
_, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithOutState := ðpb.BeaconBlock{Slot: 0}
|
||||
BlkWithOutState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 0}}
|
||||
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithOutStateRoot, _ := ssz.SigningRoot(BlkWithOutState)
|
||||
BlkWithOutStateRoot, _ := ssz.HashTreeRoot(BlkWithOutState.Block)
|
||||
|
||||
BlkWithStateBadAtt := ðpb.BeaconBlock{Slot: 1}
|
||||
BlkWithStateBadAtt := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithStateBadAttRoot, _ := ssz.SigningRoot(BlkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, _ := ssz.HashTreeRoot(BlkWithStateBadAtt.Block)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, BlkWithStateBadAttRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithValidState := ðpb.BeaconBlock{Slot: 2}
|
||||
BlkWithValidState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithValidStateRoot, _ := ssz.SigningRoot(BlkWithValidState)
|
||||
BlkWithValidStateRoot, _ := ssz.HashTreeRoot(BlkWithValidState.Block)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
ActiveIndexRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}, BlkWithValidStateRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -68,12 +67,19 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "target root 0x41 does not exist in db",
|
||||
wantErrString: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
@@ -83,22 +89,25 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
wantErrString: "pre state of target block 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "process attestation from future epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "could not process slot from the future",
|
||||
wantErrString: "does not match current epoch",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
if err := store.GenesisStore(
|
||||
ctx,
|
||||
ðpb.Checkpoint{Root: BlkWithValidStateRoot[:]},
|
||||
ðpb.Checkpoint{Root: BlkWithValidStateRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := store.OnAttestation(ctx, tt.a)
|
||||
err := store.OnAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
@@ -118,32 +127,25 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
crosslinks := make([]*ethpb.Crosslink, params.BeaconConfig().ShardCount)
|
||||
for i := 0; i < len(crosslinks); i++ {
|
||||
crosslinks[i] = ðpb.Crosslink{
|
||||
ParentRoot: make([]byte, 32),
|
||||
DataRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
s := &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
ActiveIndexRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
||||
CurrentCrosslinks: crosslinks,
|
||||
CompactCommitteesRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
r := [32]byte{'g'}
|
||||
if err := store.db.SaveState(ctx, s, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: r[:]}, ðpb.Checkpoint{Root: r[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -190,7 +192,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
}
|
||||
|
||||
s.Slot = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: r[:]}, ðpb.Checkpoint{Root: r[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
||||
@@ -203,51 +205,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AggregateAttestation(t *testing.T) {
|
||||
_, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
f := &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
domain := helpers.Domain(f, 0, params.BeaconConfig().DomainAttestation)
|
||||
sig := privKeys[0].Sign([]byte{}, domain)
|
||||
|
||||
store := &Store{attsQueue: make(map[[32]byte]*ethpb.Attestation)}
|
||||
|
||||
b1 := bitfield.NewBitlist(8)
|
||||
b1.SetBitAt(0, true)
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b1, Signature: sig.Marshal()}
|
||||
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, _ := ssz.HashTreeRoot(a.Data)
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, b1) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b2 := bitfield.NewBitlist(8)
|
||||
b2.SetBitAt(1, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b2, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{3, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b3 := bitfield.NewBitlist(8)
|
||||
b3.SetBitAt(7, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b3, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{131, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ReturnAggregatedAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
@@ -270,3 +227,143 @@ func TestStore_ReturnAggregatedAttestation(t *testing.T) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
epoch := uint64(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
baseState.Slot = epoch * params.BeaconConfig().SlotsPerEpoch
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
returned, err := store.saveCheckpointState(ctx, baseState, checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(baseState, returned) {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err := store.checkpointState.StateByCheckpoint(checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cached != nil {
|
||||
t.Error("State shouldn't have been cached")
|
||||
}
|
||||
|
||||
epoch = uint64(2)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
returned, err = store.saveCheckpointState(ctx, baseState, newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(baseState, returned) {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err = store.checkpointState.StateByCheckpoint(newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(returned, cached) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{Epoch: 1}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
2*params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); !strings.Contains(err.Error(),
|
||||
"target epoch 0 does not match current epoch 2 or prev epoch 1") {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
d := ðpb.AttestationData{}
|
||||
if err := s.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
s.db.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := s.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "could not process attestation for future block") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
s.db.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := s.verifyBeaconBlock(ctx, d); err != nil {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,19 +5,21 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -48,15 +50,22 @@ import (
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
func (s *Store) OnBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
@@ -64,7 +73,7 @@ func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
root, err := ssz.SigningRoot(b)
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
@@ -72,16 +81,12 @@ func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
"slot": b.Slot,
|
||||
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
|
||||
}).Info("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, b)
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
if err := s.updateBlockAttestationsVotes(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not update votes for attestations in block")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
if err := s.db.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
@@ -89,29 +94,29 @@ func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
startSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch + 1)
|
||||
endSlot := helpers.StartSlot(postState.FinalizedCheckpoint.Epoch+1) - 1 // Inclusive
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
@@ -125,30 +130,42 @@ func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
if postState.Slot >= s.nextEpochBoundarySlot {
|
||||
logEpochData(postState)
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committee shuffled indices at the end of every epoch
|
||||
// Update committees cache at epoch boundary slot.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnBlockNoVerifyStateTransition is called when an initial sync block is received.
|
||||
// OnBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without any BLS verification. The BLS verification
|
||||
// includes proposer signature, randao and attestation's aggregated signature.
|
||||
func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
// includes proposer signature, randao and attestation's aggregated signature. It also does not save
|
||||
// attestations.
|
||||
func (s *Store) OnBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
s.initSyncStateLock.Lock()
|
||||
defer s.initSyncStateLock.Unlock()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
preState, err := s.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -156,60 +173,75 @@ func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.Bea
|
||||
|
||||
log.WithField("slot", b.Slot).Debug("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, b)
|
||||
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
if err := s.db.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
s.initSyncState[root] = postState
|
||||
} else {
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
startSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch + 1)
|
||||
endSlot := helpers.StartSlot(postState.FinalizedCheckpoint.Epoch+1) - 1 // Inclusive
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.saveInitState(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save init sync finalized state")
|
||||
}
|
||||
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
|
||||
if flags.Get().EnableArchive {
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
if postState.Slot >= s.nextEpochBoundarySlot {
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -219,6 +251,9 @@ func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.Bea
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
@@ -243,57 +278,6 @@ func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationsVotes checks the attestations in block and filter out the seen ones,
|
||||
// the unseen ones get passed to updateBlockAttestationVote for updating fork choice votes.
|
||||
func (s *Store) updateBlockAttestationsVotes(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
for _, att := range atts {
|
||||
// If we have not seen the attestation yet
|
||||
r, err := hashutil.HashProto(att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.seenAtts[r] {
|
||||
continue
|
||||
}
|
||||
if err := s.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
log.WithError(err).Warn("Attestation failed to update vote")
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationVotes checks the attestation to update validator's latest votes.
|
||||
func (s *Store) updateBlockAttestationVote(ctx context.Context, att *ethpb.Attestation) error {
|
||||
tgt := att.Data.Target
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(tgt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get state for attestation tgt root")
|
||||
}
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
for _, i := range append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...) {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgt.Epoch > vote.Epoch {
|
||||
if err := s.db.SaveValidatorLatestVote(ctx, i, &pb.ValidatorLatestVote{
|
||||
Epoch: tgt.Epoch,
|
||||
Root: tgt.Root,
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "could not save latest vote for validator %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
@@ -309,17 +293,24 @@ func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*p
|
||||
// verifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Store) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
|
||||
finalizedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlk == nil {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
|
||||
defer span.End()
|
||||
|
||||
finalizedBlkSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
|
||||
return fmt.Errorf("block from slot %d is not a descendent of the current finalized block", slot)
|
||||
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -370,22 +361,45 @@ func (s *Store) saveNewBlockAttestations(ctx context.Context, atts []*ethpb.Atte
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearSeenAtts clears seen attestations map, it gets called upon new finalization.
|
||||
func (s *Store) clearSeenAtts() {
|
||||
s.seenAttsLock.Lock()
|
||||
s.seenAttsLock.Unlock()
|
||||
s.seenAtts = make(map[[32]byte]bool)
|
||||
}
|
||||
|
||||
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
|
||||
func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
|
||||
defer span.End()
|
||||
|
||||
// Do not remove genesis state or finalized state at epoch boundary.
|
||||
if startSlot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
// Make sure start slot is not a skipped slot
|
||||
for i := startSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
startSlot = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure finalized slot is not a skipped slot.
|
||||
for i := endSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
endSlot = i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Do not remove genesis state
|
||||
if startSlot == 0 {
|
||||
startSlot++
|
||||
}
|
||||
// If end slot comes less than start slot
|
||||
if endSlot < startSlot {
|
||||
endSlot = startSlot
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
roots, err := s.db.BlockRoots(ctx, filter)
|
||||
@@ -393,9 +407,166 @@ func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot ui
|
||||
return err
|
||||
}
|
||||
|
||||
roots, err = s.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
func (s *Store) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
if helpers.SlotsSinceEpochStarts(s.currentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
newJustifiedBlockSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block
|
||||
if newJustifiedBlock.Slot <= helpers.StartSlot(s.justifiedCheckpt.Epoch) {
|
||||
return false, nil
|
||||
}
|
||||
justifiedBlockSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block
|
||||
b, err := s.ancestor(ctx, newJustifiedCheckpt.Root, justifiedBlock.Slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Store) updateJustified(ctx context.Context, state *pb.BeaconState) error {
|
||||
if state.CurrentJustifiedCheckpoint.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = state.CurrentJustifiedCheckpoint
|
||||
}
|
||||
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, state.CurrentJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if canUpdate {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint
|
||||
}
|
||||
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
justifiedRoot := bytesutil.ToBytes32(state.CurrentJustifiedCheckpoint.Root)
|
||||
justifiedState := s.initSyncState[justifiedRoot]
|
||||
if err := s.db.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save justified state")
|
||||
}
|
||||
}
|
||||
|
||||
return s.db.SaveJustifiedCheckpoint(ctx, state.CurrentJustifiedCheckpoint)
|
||||
}
|
||||
|
||||
// currentSlot returns the current slot based on time.
|
||||
func (s *Store) currentSlot() uint64 {
|
||||
return (uint64(time.Now().Unix()) - s.genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
}
|
||||
|
||||
// updates justified check point in store if a better check point is known
|
||||
func (s *Store) updateJustifiedCheckpoint() {
|
||||
// Update at epoch boundary slot only
|
||||
if !helpers.IsEpochStart(s.currentSlot()) {
|
||||
return
|
||||
}
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
}
|
||||
}
|
||||
|
||||
// This receives cached state in memory for initial sync only during initial sync.
|
||||
func (s *Store) cachedPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
|
||||
var err error
|
||||
if preState == nil {
|
||||
preState, err = s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
}
|
||||
return proto.Clone(preState).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// This saves every finalized state in DB during initial sync, needed as part of optimization to
|
||||
// use cache state during initial sync in case of restart.
|
||||
func (s *Store) saveInitState(ctx context.Context, state *pb.BeaconState) error {
|
||||
if !featureconfig.Get().InitSyncCacheState {
|
||||
return nil
|
||||
}
|
||||
finalizedRoot := bytesutil.ToBytes32(state.FinalizedCheckpoint.Root)
|
||||
fs := s.initSyncState[finalizedRoot]
|
||||
|
||||
if err := s.db.SaveState(ctx, fs, finalizedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
for r, oldState := range s.initSyncState {
|
||||
if oldState.Slot < state.FinalizedCheckpoint.Epoch*params.BeaconConfig().SlotsPerEpoch {
|
||||
delete(s.initSyncState, r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This filters block roots that are not known as head root and finalized root in DB.
|
||||
// It serves as the last line of defence before we prune states.
|
||||
func (s *Store) filterBlockRoots(ctx context.Context, roots [][32]byte) ([][32]byte, error) {
|
||||
f, err := s.db.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fRoot := f.Root
|
||||
h, err := s.db.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hRoot, err := ssz.SigningRoot(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([][32]byte, 0, len(roots))
|
||||
for _, root := range roots {
|
||||
if bytes.Equal(root[:], fRoot[:]) || bytes.Equal(root[:], hRoot[:]) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, root)
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
@@ -1,23 +1,24 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
@@ -27,23 +28,40 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
genesisStateRoot, err := stateutil.HashTreeRootState(&pb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, validGenesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
randomParentRoot := []byte{'a'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot)); err != nil {
|
||||
random := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: validGenesisRoot[:]}}
|
||||
if err := db.SaveBlock(ctx, random); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
randomParentRoot, err := ssz.HashTreeRoot(random.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, randomParentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
randomParentRoot2 := roots[1]
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
validGenesisRoot := []byte{'g'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(validGenesisRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -60,13 +78,13 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "block is from the feature",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot, Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:], Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot},
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:]},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
||||
},
|
||||
@@ -80,12 +98,12 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: validGenesisRoot[:]}, ðpb.Checkpoint{Root: validGenesisRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.finalizedCheckpt.Root = roots[0]
|
||||
|
||||
err := store.OnBlock(ctx, tt.blk)
|
||||
err := store.OnBlock(ctx, ðpb.SignedBeaconBlock{Block: tt.blk})
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
@@ -119,116 +137,14 @@ func TestStore_SaveNewValidators(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
Crosslink: ðpb.Crosslink{
|
||||
Shard: 0,
|
||||
StartEpoch: 0,
|
||||
},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err := blocks.ConvertToIndexed(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var attestedIndices []uint64
|
||||
for _, k := range append(indices.CustodyBit_0Indices, indices.CustodyBit_1Indices...) {
|
||||
attestedIndices = append(attestedIndices, k)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, i := range attestedIndices {
|
||||
v, err := store.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Root, r[:]) {
|
||||
t.Error("Attested roots don't match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationsVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
atts := make([]*ethpb.Attestation, 5)
|
||||
hashes := make([][32]byte, 5)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
Crosslink: ðpb.Crosslink{
|
||||
Shard: uint64(i),
|
||||
StartEpoch: 0,
|
||||
},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
h, _ := hashutil.HashProto(atts[i])
|
||||
hashes[i] = h
|
||||
}
|
||||
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationsVotes(ctx, atts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, h := range hashes {
|
||||
if !store.seenAtts[h] {
|
||||
t.Error("Seen attestation did not get recorded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}}
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}}
|
||||
r1, _ := ssz.HashTreeRoot(a1.Data)
|
||||
r2, _ := ssz.HashTreeRoot(a2.Data)
|
||||
|
||||
@@ -252,8 +168,8 @@ func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}}
|
||||
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}}
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -287,13 +203,15 @@ func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
|
||||
// Save 100 blocks in DB, each has a state.
|
||||
numBlocks := 100
|
||||
totalBlocks := make([]*ethpb.BeaconBlock, numBlocks)
|
||||
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
r, err := ssz.SigningRoot(totalBlocks[i])
|
||||
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -304,10 +222,14 @@ func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 1
|
||||
finalizedEpoch := uint64(1)
|
||||
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -318,15 +240,16 @@ func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != 0 && s.Slot < endSlot {
|
||||
if s != nil && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 5
|
||||
newFinalizedEpoch := uint64(5)
|
||||
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1), endSlot); err != nil {
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
@@ -334,12 +257,354 @@ func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies boundary state didnt get deleted
|
||||
if s != nil {
|
||||
isBoundary := s.Slot%params.BeaconConfig().SlotsPerEpoch == 0
|
||||
if !isBoundary && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != newFinalizedSlot && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix())
|
||||
|
||||
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
store.genesisTime = uint64(time.Now().Unix()) - diff
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
store.genesisTime = uint64(time.Now().Unix()) - diff
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if update {
|
||||
t.Error("Should not be able to update justified, received true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedCheckpoint_Update(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix())
|
||||
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
||||
store.updateJustifiedCheckpoint()
|
||||
|
||||
if !bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
||||
t.Error("Justified check point root did not update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedCheckpoint_NoUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix()) - params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
||||
store.updateJustifiedCheckpoint()
|
||||
|
||||
if bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
||||
t.Error("Justified check point root was not suppose to update")
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Save 5 blocks in DB, each has a state.
|
||||
numBlocks := 5
|
||||
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
}
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, blockRoots[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 10, 11); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Since 5-10 are skip slots, block with slot 4 should be deleted
|
||||
s, err := store.db.State(ctx, blockRoots[4])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("Did not delete state for start slot")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
store.initSyncState[r] = s
|
||||
|
||||
wanted := "pre state of slot 1 does not exist"
|
||||
if _, err := store.cachedPreState(ctx, b); !strings.Contains(err.Error(), wanted) {
|
||||
t.Fatal("Not expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromCacheWithFeature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
store.initSyncState[r] = s
|
||||
|
||||
received, err := store.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, received) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
|
||||
_, err := store.cachedPreState(ctx, b)
|
||||
wanted := "pre state of slot 1 does not exist"
|
||||
if err.Error() != wanted {
|
||||
t.Error("Did not get wanted error")
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
store.db.SaveState(ctx, s, r)
|
||||
|
||||
received, err := store.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, received) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveInitState_CanSaveDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
for i := uint64(0); i < 64; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: i}
|
||||
s := &pb.BeaconState{Slot: i}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
store.initSyncState[r] = s
|
||||
}
|
||||
|
||||
// Set finalized root as slot 32
|
||||
finalizedRoot, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 32})
|
||||
|
||||
if err := store.saveInitState(ctx, &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 1, Root: finalizedRoot[:]}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify finalized state is saved in DB
|
||||
finalizedState, err := store.db.State(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if finalizedState == nil {
|
||||
t.Error("finalized state can't be nil")
|
||||
}
|
||||
|
||||
// Verify cached state is properly pruned
|
||||
if len(store.initSyncState) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
t.Errorf("wanted: %d, got: %d", len(store.initSyncState), params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
signedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, signedBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(signedBlock.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.initSyncState[r] = &pb.BeaconState{}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Could update
|
||||
s := &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: r[:]}}
|
||||
if err := store.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if store.bestJustifiedCheckpt.Epoch != s.CurrentJustifiedCheckpoint.Epoch {
|
||||
t.Error("Incorrect justified epoch in store")
|
||||
}
|
||||
|
||||
// Could not update
|
||||
store.bestJustifiedCheckpt.Epoch = 2
|
||||
if err := store.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if store.bestJustifiedCheckpt.Epoch != 2 {
|
||||
t.Error("Incorrect justified epoch in store")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBlockRoots_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
fBlock := ðpb.BeaconBlock{}
|
||||
fRoot, _ := ssz.HashTreeRoot(fBlock)
|
||||
hBlock := ðpb.BeaconBlock{Slot: 1}
|
||||
headRoot, _ := ssz.HashTreeRoot(hBlock)
|
||||
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: fBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, fRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: fRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: hBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
roots := [][32]byte{{'C'}, {'D'}, headRoot, {'E'}, fRoot, {'F'}}
|
||||
wanted := [][32]byte{{'C'}, {'D'}, {'E'}, {'F'}}
|
||||
|
||||
received, err := store.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(wanted, received) {
|
||||
t.Error("Did not filter correctly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,17 +3,23 @@ package forkchoice
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -21,9 +27,9 @@ import (
|
||||
// to beacon blocks to compute head.
|
||||
type ForkChoicer interface {
|
||||
Head(ctx context.Context) ([]byte, error)
|
||||
OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error)
|
||||
OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) error
|
||||
OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) error
|
||||
OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
@@ -31,17 +37,21 @@ type ForkChoicer interface {
|
||||
// Store represents a service struct that handles the forkchoice
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Store struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db db.Database
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
attsQueue map[[32]byte]*ethpb.Attestation
|
||||
attsQueueLock sync.Mutex
|
||||
seenAtts map[[32]byte]bool
|
||||
seenAttsLock sync.Mutex
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db db.Database
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
genesisTime uint64
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
latestVoteMap map[uint64]*pb.ValidatorLatestVote
|
||||
voteLock sync.RWMutex
|
||||
initSyncState map[[32]byte]*pb.BeaconState
|
||||
initSyncStateLock sync.RWMutex
|
||||
nextEpochBoundarySlot uint64
|
||||
}
|
||||
|
||||
// NewForkChoiceService instantiates a new service instance that will
|
||||
@@ -53,8 +63,8 @@ func NewForkChoiceService(ctx context.Context, db db.Database) *Store {
|
||||
cancel: cancel,
|
||||
db: db,
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
attsQueue: make(map[[32]byte]*ethpb.Attestation),
|
||||
seenAtts: make(map[[32]byte]bool),
|
||||
latestVoteMap: make(map[uint64]*pb.ValidatorLatestVote),
|
||||
initSyncState: make(map[[32]byte]*pb.BeaconState),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,7 +91,9 @@ func (s *Store) GenesisStore(
|
||||
finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
|
||||
s.justifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.bestJustifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.finalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.prevFinalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
|
||||
justifiedState, err := s.db.State(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
@@ -95,6 +107,35 @@ func (s *Store) GenesisStore(
|
||||
return errors.Wrap(err, "could not save genesis state in check point cache")
|
||||
}
|
||||
|
||||
s.genesisTime = justifiedState.GenesisTime
|
||||
if err := s.cacheGenesisState(ctx); err != nil {
|
||||
return errors.Wrap(err, "could not cache initial sync state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets up gensis for initial sync state cache.
|
||||
func (s *Store) cacheGenesisState(ctx context.Context) error {
|
||||
if !featureconfig.Get().InitSyncCacheState {
|
||||
return nil
|
||||
}
|
||||
|
||||
genesisState, err := s.db.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateRoot, err := stateutil.HashTreeRootState(genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
s.initSyncState[genesisBlkRoot] = genesisState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -113,10 +154,19 @@ func (s *Store) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte,
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
|
||||
defer span.End()
|
||||
|
||||
b, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
// Stop recursive ancestry lookup if context is cancelled.
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
signed, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
|
||||
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
|
||||
// operation can proceed. This is not an error condition.
|
||||
@@ -160,18 +210,21 @@ func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64
|
||||
return 0, errors.Wrap(err, "could not get active indices for last justified checkpoint")
|
||||
}
|
||||
|
||||
wantedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
wantedBlkSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get target block")
|
||||
}
|
||||
if wantedBlkSigned == nil || wantedBlkSigned.Block == nil {
|
||||
return 0, errors.New("nil wanted block")
|
||||
}
|
||||
wantedBlk := wantedBlkSigned.Block
|
||||
|
||||
balances := uint64(0)
|
||||
s.voteLock.RLock()
|
||||
defer s.voteLock.RUnlock()
|
||||
for _, i := range activeIndices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get validator %d's latest vote", i)
|
||||
}
|
||||
if vote == nil {
|
||||
vote, ok := s.latestVoteMap[i]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -189,14 +242,16 @@ func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64
|
||||
// Head returns the head of the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_head(store: Store) -> Hash:
|
||||
// def get_head(store: Store) -> Root:
|
||||
// # Get filtered block tree that only includes viable branches
|
||||
// blocks = get_filtered_block_tree(store)
|
||||
// # Execute the LMD-GHOST fork choice
|
||||
// head = store.justified_checkpoint.root
|
||||
// justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch)
|
||||
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
// while True:
|
||||
// children = [
|
||||
// root for root in store.blocks.keys()
|
||||
// if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot
|
||||
// root for root in blocks.keys()
|
||||
// if blocks[root].parent_root == head and blocks[root].slot > justified_slot
|
||||
// ]
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
@@ -207,13 +262,18 @@ func (s *Store) Head(ctx context.Context) ([]byte, error) {
|
||||
defer span.End()
|
||||
|
||||
head := s.JustifiedCheckpt().Root
|
||||
filteredBlocks, err := s.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
justifiedSlot := helpers.StartSlot(s.justifiedCheckpt.Epoch)
|
||||
for {
|
||||
startSlot := s.JustifiedCheckpt().Epoch * params.BeaconConfig().SlotsPerEpoch
|
||||
filter := filters.NewFilter().SetParentRoot(head).SetStartSlot(startSlot)
|
||||
children, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve children info")
|
||||
children := make([][32]byte, 0, len(filteredBlocks))
|
||||
for root, block := range filteredBlocks {
|
||||
if bytes.Equal(block.ParentRoot, head) && block.Slot > justifiedSlot {
|
||||
children = append(children, root)
|
||||
}
|
||||
}
|
||||
|
||||
if len(children) == 0 {
|
||||
@@ -244,6 +304,124 @@ func (s *Store) Head(ctx context.Context) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// getFilterBlockTree retrieves a filtered block tree from store, it only returns branches
|
||||
// whose leaf state's justified and finalized info agrees with what's in the store.
|
||||
// Rationale: https://notes.ethereum.org/Fj-gVkOSTpOyUx-zkWjuwg?view
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]:
|
||||
// """
|
||||
// Retrieve a filtered block true from ``store``, only returning branches
|
||||
// whose leaf state's justified/finalized info agrees with that in ``store``.
|
||||
// """
|
||||
// base = store.justified_checkpoint.root
|
||||
// blocks: Dict[Root, BeaconBlock] = {}
|
||||
// filter_block_tree(store, base, blocks)
|
||||
// return blocks
|
||||
func (s *Store) getFilterBlockTree(ctx context.Context) (map[[32]byte]*ethpb.BeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getFilterBlockTree")
|
||||
defer span.End()
|
||||
|
||||
baseRoot := bytesutil.ToBytes32(s.justifiedCheckpt.Root)
|
||||
filteredBlocks := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
if _, err := s.filterBlockTree(ctx, baseRoot, filteredBlocks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filteredBlocks, nil
|
||||
}
|
||||
|
||||
// filterBlockTree filters for branches that see latest finalized and justified info as correct on-chain
|
||||
// before running Head.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
|
||||
// block = store.blocks[block_root]
|
||||
// children = [
|
||||
// root for root in store.blocks.keys()
|
||||
// if store.blocks[root].parent_root == block_root
|
||||
// ]
|
||||
// # If any children branches contain expected finalized/justified checkpoints,
|
||||
// # add to filtered block-tree and signal viability to parent.
|
||||
// if any(children):
|
||||
// filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children]
|
||||
// if any(filter_block_tree_result):
|
||||
// blocks[block_root] = block
|
||||
// return True
|
||||
// return False
|
||||
// # If leaf block, check finalized/justified checkpoints as matching latest.
|
||||
// head_state = store.block_states[block_root]
|
||||
// correct_justified = (
|
||||
// store.justified_checkpoint.epoch == GENESIS_EPOCH
|
||||
// or head_state.current_justified_checkpoint == store.justified_checkpoint
|
||||
// )
|
||||
// correct_finalized = (
|
||||
// store.finalized_checkpoint.epoch == GENESIS_EPOCH
|
||||
// or head_state.finalized_checkpoint == store.finalized_checkpoint
|
||||
// )
|
||||
// # If expected finalized/justified, add to viable block-tree and signal viability to parent.
|
||||
// if correct_justified and correct_finalized:
|
||||
// blocks[block_root] = block
|
||||
// return True
|
||||
// # Otherwise, branch not viable
|
||||
// return False
|
||||
func (s *Store) filterBlockTree(ctx context.Context, blockRoot [32]byte, filteredBlocks map[[32]byte]*ethpb.BeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.filterBlockTree")
|
||||
defer span.End()
|
||||
signed, err := s.db.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return false, errors.New("nil block")
|
||||
}
|
||||
block := signed.Block
|
||||
|
||||
filter := filters.NewFilter().SetParentRoot(blockRoot[:])
|
||||
childrenRoots, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(childrenRoots) != 0 {
|
||||
var filtered bool
|
||||
for _, childRoot := range childrenRoots {
|
||||
didFilter, err := s.filterBlockTree(ctx, childRoot, filteredBlocks)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if didFilter {
|
||||
filtered = true
|
||||
}
|
||||
}
|
||||
if filtered {
|
||||
filteredBlocks[blockRoot] = block
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
headState, err := s.db.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if headState == nil {
|
||||
return false, fmt.Errorf("no state matching block root %v", hex.EncodeToString(blockRoot[:]))
|
||||
}
|
||||
|
||||
correctJustified := s.justifiedCheckpt.Epoch == 0 ||
|
||||
proto.Equal(s.justifiedCheckpt, headState.CurrentJustifiedCheckpoint)
|
||||
correctFinalized := s.finalizedCheckpt.Epoch == 0 ||
|
||||
proto.Equal(s.finalizedCheckpt, headState.FinalizedCheckpoint)
|
||||
if correctJustified && correctFinalized {
|
||||
filteredBlocks[blockRoot] = block
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the latest justified check point from fork choice store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.justifiedCheckpt).(*ethpb.Checkpoint)
|
||||
|
||||
@@ -7,15 +7,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
)
|
||||
|
||||
func TestStore_GenesisStoreOk(t *testing.T) {
|
||||
@@ -27,18 +28,21 @@ func TestStore_GenesisStoreOk(t *testing.T) {
|
||||
|
||||
genesisTime := time.Unix(9999, 0)
|
||||
genesisState := &pb.BeaconState{GenesisTime: uint64(genesisTime.Unix())}
|
||||
genesisStateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
genesisStateRoot, err := stateutil.HashTreeRootState(genesisState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
@@ -68,7 +72,7 @@ func TestStore_AncestorOk(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -108,7 +112,7 @@ func TestStore_AncestorNotPartOfTheChain(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -139,7 +143,7 @@ func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -150,18 +154,21 @@ func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
@@ -174,17 +181,11 @@ func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,15 +212,13 @@ func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_ChildrenBlocksFromParentRoot(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -250,7 +249,7 @@ func TestStore_GetHead(t *testing.T) {
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -261,15 +260,21 @@ func TestStore_GetHead(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
@@ -293,17 +298,11 @@ func TestStore_GetHead(t *testing.T) {
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,9 +316,8 @@ func TestStore_GetHead(t *testing.T) {
|
||||
}
|
||||
|
||||
// 1 validator switches vote to B7 to gain 34%, enough to switch head
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, 50, &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(50)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -331,9 +329,7 @@ func TestStore_GetHead(t *testing.T) {
|
||||
// 18 validators switches vote to B1 to gain 51%, enough to switch head
|
||||
for i := 0; i < 18; i++ {
|
||||
idx := 50 + uint64(i)
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, idx, &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.latestVoteMap[uint64(idx)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
@@ -344,3 +340,178 @@ func TestStore_GetHead(t *testing.T) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheGenesisState_Correct(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
b := ðpb.BeaconBlock{Slot: 1}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
s := &pb.BeaconState{GenesisTime: 99}
|
||||
|
||||
store.db.SaveState(ctx, s, r)
|
||||
store.db.SaveGenesisBlockRoot(ctx, r)
|
||||
|
||||
if err := store.cacheGenesisState(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, state := range store.initSyncState {
|
||||
if !reflect.DeepEqual(s, state) {
|
||||
t.Error("Did not get wanted state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetFilterBlockTree_CorrectLeaf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tree, err := store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
for _, root := range roots {
|
||||
root32 := bytesutil.ToBytes32(root)
|
||||
b, _ := store.db.Block(ctx, root32)
|
||||
if b != nil {
|
||||
wanted[root32] = b.Block
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(tree, wanted) {
|
||||
t.Error("Did not filter tree correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetFilterBlockTree_IncorrectLeaf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Filter for incorrect leaves for 1, 7 and 8
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[1]))
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[7]))
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[8]))
|
||||
store.justifiedCheckpt.Epoch = 1
|
||||
tree, err := store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(tree) != 0 {
|
||||
t.Error("filtered tree should be 0 length")
|
||||
}
|
||||
|
||||
// Set leave 1 as correct
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: store.justifiedCheckpt.Root}}, bytesutil.ToBytes32(roots[1]))
|
||||
tree, err = store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
root32 := bytesutil.ToBytes32(roots[0])
|
||||
b, err = store.db.Block(ctx, root32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted[root32] = b.Block
|
||||
root32 = bytesutil.ToBytes32(roots[1])
|
||||
b, err = store.db.Block(ctx, root32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted[root32] = b.Block
|
||||
|
||||
if !reflect.DeepEqual(tree, wanted) {
|
||||
t.Error("Did not filter tree correctly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@ package forkchoice
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
@@ -15,31 +15,40 @@ import (
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: genesisRoot}
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
r1, _ := ssz.HashTreeRoot(b1)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
r3, _ := ssz.HashTreeRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
r4, _ := ssz.HashTreeRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
r5, _ := ssz.HashTreeRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
r6, _ := ssz.HashTreeRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
r7, _ := ssz.HashTreeRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
r8, _ := ssz.HashTreeRoot(b8)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
@@ -72,39 +81,39 @@ func blockTree1(db db.Database) ([][]byte, error) {
|
||||
//}
|
||||
func blockTree2(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
r1, _ := ssz.HashTreeRoot(b1)
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: r0[:]}
|
||||
r2, _ := ssz.SigningRoot(b2)
|
||||
r2, _ := ssz.HashTreeRoot(b2)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r1[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
r3, _ := ssz.HashTreeRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r1[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
r4, _ := ssz.HashTreeRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r2[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
r5, _ := ssz.HashTreeRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r2[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
r6, _ := ssz.HashTreeRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r3[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
r7, _ := ssz.HashTreeRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r3[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
r8, _ := ssz.HashTreeRoot(b8)
|
||||
b9 := ðpb.BeaconBlock{Slot: 9, ParentRoot: r3[:]}
|
||||
r9, _ := ssz.SigningRoot(b9)
|
||||
r9, _ := ssz.HashTreeRoot(b9)
|
||||
b10 := ðpb.BeaconBlock{Slot: 10, ParentRoot: r3[:]}
|
||||
r10, _ := ssz.SigningRoot(b10)
|
||||
r10, _ := ssz.HashTreeRoot(b10)
|
||||
b11 := ðpb.BeaconBlock{Slot: 11, ParentRoot: r4[:]}
|
||||
r11, _ := ssz.SigningRoot(b11)
|
||||
r11, _ := ssz.HashTreeRoot(b11)
|
||||
b12 := ðpb.BeaconBlock{Slot: 12, ParentRoot: r6[:]}
|
||||
r12, _ := ssz.SigningRoot(b12)
|
||||
r12, _ := ssz.HashTreeRoot(b12)
|
||||
b13 := ðpb.BeaconBlock{Slot: 13, ParentRoot: r6[:]}
|
||||
r13, _ := ssz.SigningRoot(b13)
|
||||
r13, _ := ssz.HashTreeRoot(b13)
|
||||
b14 := ðpb.BeaconBlock{Slot: 14, ParentRoot: r7[:]}
|
||||
r14, _ := ssz.SigningRoot(b14)
|
||||
r14, _ := ssz.HashTreeRoot(b14)
|
||||
b15 := ðpb.BeaconBlock{Slot: 15, ParentRoot: r7[:]}
|
||||
r15, _ := ssz.SigningRoot(b15)
|
||||
r15, _ := ssz.HashTreeRoot(b15)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
@@ -121,19 +130,19 @@ func blockTree3(db db.Database) ([][]byte, error) {
|
||||
roots := make([][]byte, 0, blkCount)
|
||||
blks := make([]*ethpb.BeaconBlock, 0, blkCount)
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
roots = append(roots, r0[:])
|
||||
blks = append(blks, b0)
|
||||
|
||||
for i := 1; i < blkCount; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: uint64(i), ParentRoot: roots[len(roots)-1]}
|
||||
r, _ := ssz.SigningRoot(b)
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
roots = append(roots, r[:])
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
for _, b := range blks {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
||||
@@ -47,10 +47,22 @@ var (
|
||||
Name: "processed_attestation_counter",
|
||||
Help: "The # of processed attestation with pubsub and fork choice, this ususally means attestations from rpc",
|
||||
})
|
||||
headFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_epoch",
|
||||
Help: "Last finalized epoch of the head state",
|
||||
})
|
||||
headFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_root",
|
||||
Help: "Last finalized root of the head state",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) reportSlotMetrics(currentSlot uint64) {
|
||||
beaconSlot.Set(float64(currentSlot))
|
||||
beaconHeadSlot.Set(float64(s.HeadSlot()))
|
||||
beaconHeadRoot.Set(float64(bytesutil.ToLowInt64(s.HeadRoot())))
|
||||
if s.headState != nil {
|
||||
headFinalizedEpoch.Set(float64(s.headState.FinalizedCheckpoint.Epoch))
|
||||
headFinalizedRoot.Set(float64(bytesutil.ToLowInt64(s.headState.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,57 +3,23 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
}
|
||||
|
||||
// ReceiveAttestation is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Gossip attestation to other peers
|
||||
// 2. Validate attestation, update validator's latest vote
|
||||
// 3. Apply fork choice to the processed attestation
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestation")
|
||||
defer span.End()
|
||||
|
||||
// Broadcast the new attestation to the network.
|
||||
if err := s.p2p.Broadcast(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast attestation")
|
||||
}
|
||||
|
||||
attDataRoot, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to hash attestation")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attRoot": fmt.Sprintf("%#x", attDataRoot),
|
||||
"blockRoot": fmt.Sprintf("%#x", att.Data.BeaconBlockRoot),
|
||||
}).Debug("Broadcasting attestation")
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, att); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedAtt.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
@@ -64,8 +30,7 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
defer span.End()
|
||||
|
||||
// Update forkchoice store for the new attestation
|
||||
attSlot, err := s.forkChoiceStore.OnAttestation(ctx, att)
|
||||
if err != nil {
|
||||
if err := s.forkChoiceStore.OnAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation from fork choice service")
|
||||
}
|
||||
|
||||
@@ -76,37 +41,49 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
}
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
signed, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil head block")
|
||||
}
|
||||
if err := s.saveHead(ctx, signed, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking for competing attestation's target roots at epoch boundary.
|
||||
if !helpers.IsEpochStart(attSlot) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
targetRoot, err := helpers.BlockRoot(s.headState, att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get target root for epoch %d", att.Data.Target.Epoch)
|
||||
}
|
||||
isCompetingAtts(targetRoot, att.Data.Target.Root[:])
|
||||
}
|
||||
|
||||
processedAttNoPubsub.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the attestation is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingAtts(headTargetRoot []byte, attTargetRoot []byte) {
|
||||
if !bytes.Equal(attTargetRoot, headTargetRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attTargetRoot": hex.EncodeToString(attTargetRoot),
|
||||
"headTargetRoot": hex.EncodeToString(headTargetRoot),
|
||||
}).Warn("target heads different from new attestation")
|
||||
competingAtts.Inc()
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation() {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
ctx := context.Background()
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"targetRoot": fmt.Sprintf("%#x", a.Data.Target.Root),
|
||||
}).WithError(err).Error("Could not receive attestation in chain service")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,84 +3,15 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestReceiveAttestation_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
Crosslink: ðpb.Crosslink{},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestation_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
chainService.canonicalRoots[0] = r[:]
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
Crosslink: ðpb.Crosslink{},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
@@ -88,14 +19,14 @@ func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
r, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
root, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -104,8 +35,7 @@ func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
Crosslink: ðpb.Crosslink{},
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -5,10 +5,16 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -16,10 +22,10 @@ import (
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
@@ -28,11 +34,11 @@ type BlockReceiver interface {
|
||||
// 2. Validate block, apply state transition and update check points
|
||||
// 3. Apply fork choice to the processed block
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
|
||||
root, err := ssz.SigningRoot(block)
|
||||
root, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
@@ -58,17 +64,18 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) er
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, block); err != nil {
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(block)
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
@@ -78,36 +85,51 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconB
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
signedHeadBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
if signedHeadBlock == nil || signedHeadBlock.Block == nil {
|
||||
return errors.New("nil head block")
|
||||
}
|
||||
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
if err := s.saveHead(ctx, signedHeadBlock, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, block); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Add attestations from the block to the pool for fork choice.
|
||||
if err := s.attPool.SaveBlockAttestations(blockCopy.Block.Body.Attestations); err != nil {
|
||||
log.Errorf("Could not save attestation for fork choice: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(block.Slot)
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log if block is a competing block.
|
||||
isCompetingBlock(root[:], block.Slot, headRoot, headBlk.Slot)
|
||||
isCompetingBlock(root[:], blockCopy.Block.Slot, headRoot, signedHeadBlock.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(block, root[:])
|
||||
logStateTransitionData(blockCopy.Block, root[:])
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
processedBlkNoPubsub.Inc()
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(bytesutil.ToBytes32(headRoot))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -115,40 +137,47 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconB
|
||||
// that are preformed blocks that is received from initial sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, block); err != nil {
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(block)
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, block, root); err != nil {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, block); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(block.Slot)
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(block, root[:])
|
||||
logStateTransitionData(blockCopy.Block, root[:])
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
processedBlkNoPubsubForkchoice.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -156,56 +185,61 @@ func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *eth
|
||||
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
|
||||
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
|
||||
// It simulates light client behavior and assumes 100% trust with the syncing peer.
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received block without verifying its BLS contents.
|
||||
if err := s.forkChoiceStore.OnBlockNoVerifyStateTransition(ctx, block); err != nil {
|
||||
return errors.Wrap(err, "could not process block from fork choice service")
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
if err := s.forkChoiceStore.OnBlockInitialSyncStateTransition(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not process blockCopy from fork choice service")
|
||||
}
|
||||
root, err := ssz.SigningRoot(block)
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
return errors.Wrap(err, "could not get signing root on received blockCopy")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, block, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHeadNoDB(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(block.Slot)
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"attestations": len(block.Body.Attestations),
|
||||
"deposits": len(block.Body.Deposits),
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
return nil
|
||||
}
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
// cleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
|
||||
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
|
||||
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
|
||||
// from our node's local cache, and process validator exits and more.
|
||||
func (s *Service) cleanupBlockOperations(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
// Forward processed block to operation pool to remove individual operation from DB.
|
||||
if s.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
|
||||
log.Error("Sent processed block to no subscribers")
|
||||
}
|
||||
|
||||
// Remove pending deposits from the deposit queue.
|
||||
for _, dep := range block.Body.Deposits {
|
||||
s.depositCache.RemovePendingDeposit(ctx, dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -23,89 +25,38 @@ func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
genesis, _ := testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot+1)
|
||||
beaconState, err := state.ExecuteStateTransition(ctx, beaconState, genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesis)
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, beaconState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, cp, cp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
if err := db.SaveState(ctx, beaconState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -123,19 +74,26 @@ func TestReceiveReceiveBlockNoPubsub_CanSaveHeadInfo(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{Slot: 100}
|
||||
headBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 100}}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.SigningRoot(headBlk)
|
||||
r, err := ssz.HashTreeRoot(headBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
head := &pb.BeaconState{Slot: 100, FinalizedCheckpoint: ðpb.Checkpoint{Root: r[:]}}
|
||||
if err := db.SaveState(ctx, head, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}); err != nil {
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -158,14 +116,17 @@ func TestReceiveReceiveBlockNoPubsub_SameHead(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{}
|
||||
headBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newBlk := ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}
|
||||
newRoot, _ := ssz.SigningRoot(newBlk)
|
||||
newBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
}
|
||||
newRoot, _ := ssz.HashTreeRoot(newBlk.Block)
|
||||
if err := db.SaveBlock(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -187,81 +148,40 @@ func TestReceiveBlockNoPubsubForkchoice_ProcessCorrectly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{})
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
|
||||
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
|
||||
stateRoot, err := stateutil.HashTreeRootState(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
parentRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{Root: parentRoot[:]}, ðpb.Checkpoint{Root: parentRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
|
||||
block, err = testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
if err := db.SaveState(ctx, beaconState, bytesutil.ToBytes32(block.Block.ParentRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
|
||||
@@ -11,58 +11,49 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainFeeds interface defines the methods of the Service which provide state related
|
||||
// information feeds to consumers.
|
||||
type ChainFeeds interface {
|
||||
StateInitializedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// NewHeadNotifier defines a struct which can notify many consumers of a new,
|
||||
// canonical chain head event occuring in the node.
|
||||
type NewHeadNotifier interface {
|
||||
HeadUpdatedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
opsPoolService operations.OperationFeeds
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
chainStartChan chan time.Time
|
||||
genesisTime time.Time
|
||||
stateInitializedFeed *event.Feed
|
||||
headUpdatedFeed *event.Feed
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int64
|
||||
headSlot uint64
|
||||
headBlock *ethpb.BeaconBlock
|
||||
headState *pb.BeaconState
|
||||
canonicalRoots map[uint64][]byte
|
||||
headLock sync.RWMutex
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int64
|
||||
headSlot uint64
|
||||
headBlock *ethpb.SignedBeaconBlock
|
||||
headState *pb.BeaconState
|
||||
canonicalRoots map[uint64][]byte
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
epochParticipation map[uint64]*precompute.Balance
|
||||
epochParticipationLock sync.RWMutex
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
@@ -71,9 +62,10 @@ type Config struct {
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.Database
|
||||
DepositCache *depositcache.DepositCache
|
||||
OpsPoolService operations.OperationFeeds
|
||||
AttPool attestations.Pool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int64
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -82,19 +74,18 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
store := forkchoice.NewForkChoiceService(ctx, cfg.BeaconDB)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
opsPoolService: cfg.OpsPoolService,
|
||||
forkChoiceStore: store,
|
||||
chainStartChan: make(chan time.Time),
|
||||
stateInitializedFeed: new(event.Feed),
|
||||
headUpdatedFeed: new(event.Feed),
|
||||
p2p: cfg.P2p,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
forkChoiceStore: store,
|
||||
p2p: cfg.P2p,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
epochParticipation: make(map[uint64]*precompute.Balance),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -105,6 +96,23 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
cp, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if beaconState == nil {
|
||||
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
@@ -123,31 +131,58 @@ func (s *Service) Start() {
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, justifiedCheckpoint, finalizedCheckpoint); err != nil {
|
||||
log.Fatalf("Could not start fork choice service: %v", err)
|
||||
}
|
||||
s.stateInitializedFeed.Send(s.genesisTime)
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
subChainStart := s.chainStartFetcher.ChainStartFeed().Subscribe(s.chainStartChan)
|
||||
go func() {
|
||||
genesisTime := <-s.chainStartChan
|
||||
s.processChainStartTime(ctx, genesisTime, subChainStart)
|
||||
return
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data := event.Data.(*statefeed.ChainStartedData)
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.processChainStartTime(ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestation()
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time, chainStartSub event.Subscription) {
|
||||
initialDeposits := s.chainStartFetcher.ChainStartDeposits()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, initialDeposits, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.chainStartFetcher.PreGenesisState()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
s.stateInitializedFeed.Send(genesisTime)
|
||||
chainStartSub.Unsubscribe()
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
@@ -156,7 +191,7 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
deposits []*ethpb.Deposit,
|
||||
preGenesisState *pb.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) error {
|
||||
_, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
@@ -164,7 +199,7 @@ func (s *Service) initializeBeaconChain(
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := state.GenesisBeaconState(deposits, unixTime, eth1data)
|
||||
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
@@ -175,7 +210,7 @@ func (s *Service) initializeBeaconChain(
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(genesisState); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -198,30 +233,48 @@ func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateInitializedFeed returns a feed that is written to
|
||||
// when the beacon state is first initialized.
|
||||
func (s *Service) StateInitializedFeed() *event.Feed {
|
||||
return s.stateInitializedFeed
|
||||
}
|
||||
|
||||
// HeadUpdatedFeed is a feed containing the head block root and
|
||||
// is written to when a new head block is saved to DB.
|
||||
func (s *Service) HeadUpdatedFeed() *event.Feed {
|
||||
return s.headUpdatedFeed
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping.
|
||||
func (s *Service) saveHead(ctx context.Context, b *ethpb.BeaconBlock, r [32]byte) error {
|
||||
func (s *Service) saveHead(ctx context.Context, signed *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
s.headSlot = b.Slot
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
s.canonicalRoots[b.Slot] = r[:]
|
||||
s.headSlot = signed.Block.Slot
|
||||
|
||||
s.canonicalRoots[signed.Block.Slot] = r[:]
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
s.headBlock = signed
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
s.headState = headState
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of inital-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
s.headSlot = b.Block.Slot
|
||||
|
||||
s.canonicalRoots[b.Block.Slot] = r[:]
|
||||
|
||||
s.headBlock = b
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
@@ -231,7 +284,7 @@ func (s *Service) saveHead(ctx context.Context, b *ethpb.BeaconBlock, r [32]byte
|
||||
s.headState = headState
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"slot": b.Block.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
@@ -257,7 +310,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconSt
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
@@ -265,15 +318,15 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconSt
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could save genesis block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.saveGenesisValidators(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis validators")
|
||||
}
|
||||
@@ -283,6 +336,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconSt
|
||||
return errors.Wrap(err, "Could not start fork choice service: %v")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
s.headBlock = genesisBlk
|
||||
s.headState = genesisState
|
||||
s.canonicalRoots[genesisState.Slot] = genesisBlkRoot[:]
|
||||
@@ -295,6 +349,19 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil {
|
||||
return errors.New("no genesis block in db")
|
||||
}
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
@@ -313,7 +380,9 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
s.headSlot = s.headState.Slot
|
||||
if s.headBlock != nil && s.headBlock.Block != nil {
|
||||
s.headSlot = s.headBlock.Block.Slot
|
||||
}
|
||||
s.canonicalRoots[s.headSlot] = finalized.Root
|
||||
|
||||
return nil
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -25,13 +25,13 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 888},
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 888}},
|
||||
[32]byte{},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -4,27 +4,27 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
ssz "github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -33,10 +33,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure Service implements interfaces.
|
||||
var _ = ChainFeeds(&Service{})
|
||||
var _ = NewHeadNotifier(&Service{})
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
@@ -46,16 +42,16 @@ type store struct {
|
||||
headRoot []byte
|
||||
}
|
||||
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
func (s *store) OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
return 0, nil
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
@@ -70,105 +66,16 @@ func (s *store) Head(ctx context.Context) ([]byte, error) {
|
||||
return s.headRoot, nil
|
||||
}
|
||||
|
||||
type mockOperationService struct{}
|
||||
|
||||
func (ms *mockOperationService) IncomingProcessedBlockFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingAttFeed() *event.Feed {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingExitFeed() *event.Feed {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockClient struct{}
|
||||
|
||||
func (m *mockClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
logs := make([]gethTypes.Log, 3)
|
||||
for i := 0; i < len(logs); i++ {
|
||||
logs[i].Address = common.Address{}
|
||||
logs[i].Topics = make([]common.Hash, 5)
|
||||
logs[i].Topics[0] = common.Hash{'a'}
|
||||
logs[i].Topics[1] = common.Hash{'b'}
|
||||
logs[i].Topics[2] = common.Hash{'c'}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
}
|
||||
|
||||
type faultyClient struct{}
|
||||
|
||||
func (f *faultyClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
return nil, errors.New("unable to retrieve logs")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
@@ -182,30 +89,15 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
|
||||
func setupGenesisBlock(t *testing.T, cs *Service) ([32]byte, *ethpb.BeaconBlock) {
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := cs.beaconDB.SaveBlock(context.Background(), genesis); err != nil {
|
||||
t.Fatalf("could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
return parentHash, genesis
|
||||
}
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "ws://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
client := &mockClient{}
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
Endpoint: endpoint,
|
||||
BeaconDB: beaconDB,
|
||||
ETH1Endpoint: endpoint,
|
||||
DepositContract: common.Address{},
|
||||
Reader: client,
|
||||
Client: client,
|
||||
Logger: client,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to set up web3 service: %v", err)
|
||||
@@ -216,8 +108,9 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositcache.NewDepositCache(),
|
||||
ChainStartFetcher: web3Service,
|
||||
OpsPoolService: &mockOperationService{},
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not register blockchain service: %v", err)
|
||||
@@ -226,31 +119,47 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to setup chain service: %v", err)
|
||||
}
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
}
|
||||
|
||||
func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
// Test the start function.
|
||||
genesisChan := make(chan time.Time, 0)
|
||||
sub := chainService.stateInitializedFeed.Subscribe(genesisChan)
|
||||
defer sub.Unsubscribe()
|
||||
// Listen for state events.
|
||||
stateSubChannel := make(chan *feed.Event, 1)
|
||||
stateSub := chainService.stateNotifier.StateFeed().Subscribe(stateSubChannel)
|
||||
|
||||
// Test the chain start state notifier.
|
||||
genesisTime := time.Unix(1, 0)
|
||||
chainService.Start()
|
||||
chainService.chainStartChan <- time.Unix(0, 0)
|
||||
genesisTime := <-genesisChan
|
||||
if genesisTime != time.Unix(0, 0) {
|
||||
t.Errorf(
|
||||
"Expected genesis time to equal chainstart time (%v), received %v",
|
||||
time.Unix(0, 0),
|
||||
genesisTime,
|
||||
)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
Data: &statefeed.ChainStartedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 1; sent == 1; {
|
||||
sent = chainService.stateNotifier.StateFeed().Send(event)
|
||||
if sent == 1 {
|
||||
// Flush our local subscriber.
|
||||
<-stateSubChannel
|
||||
}
|
||||
}
|
||||
|
||||
// Now wait for notification the state is ready.
|
||||
for stateInitialized := false; stateInitialized == false; {
|
||||
recv := <-stateSubChannel
|
||||
if recv.Type == statefeed.Initialized {
|
||||
stateInitialized = true
|
||||
}
|
||||
}
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
beaconState, err := db.HeadState(context.Background())
|
||||
if err != nil {
|
||||
@@ -279,22 +188,22 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
genesisBlk := b.NewGenesisBlock([]byte{})
|
||||
blkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
blkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -319,11 +228,28 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
bc := setupBeaconChain(t, db)
|
||||
var err error
|
||||
|
||||
// Set up 10 deposits pre chain start for validators to register
|
||||
count := uint64(10)
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, count)
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), deposits, ðpb.Eth1Data{}); err != nil {
|
||||
deposits, _, _ := testutil.DeterministicDepositsAndKeys(count)
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
genState := state.EmptyGenesisState()
|
||||
genState.Eth1Data = ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
DepositCount: uint64(len(deposits)),
|
||||
}
|
||||
genState, err = b.ProcessDeposits(ctx, genState, ðpb.BeaconBlockBody{Deposits: deposits})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -338,8 +264,8 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if bc.HeadState() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
if _, err := bc.HeadState(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if bc.HeadBlock() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
@@ -354,13 +280,28 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
genesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, genesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := ðpb.BeaconBlock{Slot: finalizedSlot}
|
||||
headBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}}
|
||||
headState := &pb.BeaconState{Slot: finalizedSlot}
|
||||
headRoot, _ := ssz.SigningRoot(headBlock)
|
||||
headRoot, _ := ssz.HashTreeRoot(headBlock.Block)
|
||||
if err := db.SaveState(ctx, headState, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(finalizedSlot),
|
||||
Root: headRoot[:],
|
||||
@@ -377,13 +318,43 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
|
||||
t.Error("head block incorrect")
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadState(), headState) {
|
||||
t.Error("head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if headBlock.Slot != c.HeadSlot() {
|
||||
if !reflect.DeepEqual(s, headState) {
|
||||
t.Error("head state incorrect")
|
||||
}
|
||||
if headBlock.Block.Slot != c.HeadSlot() {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if !bytes.Equal(headRoot[:], c.HeadRoot()) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if c.genesisRoot != genesisRoot {
|
||||
t.Error("genesis block root incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reflect.DeepEqual(newB, b) {
|
||||
t.Error("head block should not be equal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,17 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,52 +1,128 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *pb.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.BeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
StateFeed *event.Feed
|
||||
BlocksReceived []*ethpb.BeaconBlock
|
||||
Genesis time.Time
|
||||
State *pb.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.SignedBeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []*ethpb.SignedBeaconBlock
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
Fork *pb.Fork
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if ms.stateNotifier == nil {
|
||||
ms.stateNotifier = &MockStateNotifier{}
|
||||
}
|
||||
return ms.stateNotifier
|
||||
}
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
type MockStateNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if ms.opNotifier == nil {
|
||||
ms.opNotifier = &MockOperationNotifier{}
|
||||
}
|
||||
return ms.opNotifier
|
||||
}
|
||||
|
||||
// MockOperationNotifier mocks the operation notifier.
|
||||
type MockOperationNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// OperationFeed returns an operation feed.
|
||||
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
if mon.feed == nil {
|
||||
mon.feed = new(event.Feed)
|
||||
}
|
||||
return mon.feed
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &pb.BeaconState{}
|
||||
}
|
||||
ms.State.Slot = block.Slot
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
}
|
||||
ms.State.Slot = block.Block.Slot
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
if ms.State == nil {
|
||||
return 0
|
||||
}
|
||||
return ms.State.Slot
|
||||
|
||||
}
|
||||
@@ -58,13 +134,18 @@ func (ms *ChainService) HeadRoot() []byte {
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock() *ethpb.BeaconBlock {
|
||||
func (ms *ChainService) HeadBlock() *ethpb.SignedBeaconBlock {
|
||||
return ms.Block
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState() *pb.BeaconState {
|
||||
return ms.State
|
||||
func (ms *ChainService) HeadState(context.Context) (*pb.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (ms *ChainService) CurrentFork() *pb.Fork {
|
||||
return ms.Fork
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
@@ -72,6 +153,16 @@ func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
@@ -82,21 +173,25 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateInitializedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateInitializedFeed() *event.Feed {
|
||||
if ms.StateFeed != nil {
|
||||
return ms.StateFeed
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
ms.StateFeed = new(event.Feed)
|
||||
return ms.StateFeed
|
||||
return helpers.ActiveValidatorIndices(ms.State, epoch)
|
||||
}
|
||||
|
||||
// HeadUpdatedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadUpdatedFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
}
|
||||
|
||||
14
beacon-chain/cache/BUILD.bazel
vendored
14
beacon-chain/cache/BUILD.bazel
vendored
@@ -3,21 +3,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"active_count.go",
|
||||
"active_indices.go",
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"common.go",
|
||||
"eth1_data.go",
|
||||
"shuffled_indices.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -25,6 +20,7 @@ go_library(
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -33,25 +29,21 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"active_count_test.go",
|
||||
"active_indices_test.go",
|
||||
"attestation_data_test.go",
|
||||
"benchmarks_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_test.go",
|
||||
"eth1_data_test.go",
|
||||
"feature_flag_test.go",
|
||||
"shuffled_indices_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
98
beacon-chain/cache/active_count.go
vendored
98
beacon-chain/cache/active_count.go
vendored
@@ -1,98 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveCountInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveCountByEpoch struct.
|
||||
ErrNotActiveCountInfo = errors.New("object is not a active count obj")
|
||||
|
||||
// maxActiveCountListSize defines the max number of active count can cache.
|
||||
maxActiveCountListSize = 1000
|
||||
|
||||
// Metrics.
|
||||
activeCountCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_miss",
|
||||
Help: "The number of active validator count requests that aren't present in the cache.",
|
||||
})
|
||||
activeCountCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_hit",
|
||||
Help: "The number of active validator count requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveCountByEpoch defines the active validator count per epoch.
|
||||
type ActiveCountByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveCount uint64
|
||||
}
|
||||
|
||||
// ActiveCountCache is a struct with 1 queue for looking up active count by epoch.
|
||||
type ActiveCountCache struct {
|
||||
activeCountCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeCountKeyFn takes the epoch as the key for the active count of a given epoch.
|
||||
func activeCountKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveCountCache creates a new active count cache for storing/accessing active validator count.
|
||||
func NewActiveCountCache() *ActiveCountCache {
|
||||
return &ActiveCountCache{
|
||||
activeCountCache: cache.NewFIFO(activeCountKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveCountInEpoch fetches ActiveCountByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveCountInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveCountCache) ActiveCountInEpoch(epoch uint64) (uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeCountCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return params.BeaconConfig().FarFutureEpoch, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeCountCacheHit.Inc()
|
||||
} else {
|
||||
activeCountCacheMiss.Inc()
|
||||
return params.BeaconConfig().FarFutureEpoch, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return params.BeaconConfig().FarFutureEpoch, ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveCount, nil
|
||||
}
|
||||
|
||||
// AddActiveCount adds ActiveCountByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveCountByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveCountCache) AddActiveCount(activeCount *ActiveCountByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeCountCache.AddIfNotPresent(activeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeCountCache, maxActiveCountListSize)
|
||||
return nil
|
||||
}
|
||||
83
beacon-chain/cache/active_count_test.go
vendored
83
beacon-chain/cache/active_count_test.go
vendored
@@ -1,83 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestActiveCountKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveCount: 10,
|
||||
}
|
||||
|
||||
key, err := activeCountKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeCountKeyFn("bad")
|
||||
if err != ErrNotActiveCountInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveCountInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountCache_ActiveCountByEpoch(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveCount: 11,
|
||||
}
|
||||
activeCount, err := cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeCount != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Error("Expected active count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeCount, err = cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeCount, aInfo.ActiveCount) {
|
||||
t.Errorf(
|
||||
"Expected fetched active count to be %v, got %v",
|
||||
aInfo.ActiveCount,
|
||||
activeCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCount_MaxSize(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
for i := uint64(0); i < 1001; i++ {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeCountCache.ListKeys()) != maxActiveCountListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveCountListSize,
|
||||
len(cache.activeCountCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
102
beacon-chain/cache/active_indices.go
vendored
102
beacon-chain/cache/active_indices.go
vendored
@@ -1,102 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveIndicesInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveIndicesByEpoch struct.
|
||||
ErrNotActiveIndicesInfo = errors.New("object is not a active indices list")
|
||||
|
||||
// maxActiveIndicesListSize defines the max number of active indices can cache.
|
||||
maxActiveIndicesListSize = 4
|
||||
|
||||
// Metrics.
|
||||
activeIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_miss",
|
||||
Help: "The number of active validator indices requests that aren't present in the cache.",
|
||||
})
|
||||
activeIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_hit",
|
||||
Help: "The number of active validator indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveIndicesByEpoch defines the active validator indices per epoch.
|
||||
type ActiveIndicesByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveIndices []uint64
|
||||
}
|
||||
|
||||
// ActiveIndicesCache is a struct with 1 queue for looking up active indices by epoch.
|
||||
type ActiveIndicesCache struct {
|
||||
activeIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeIndicesKeyFn takes the epoch as the key for the active indices of a given epoch.
|
||||
func activeIndicesKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveIndicesCache creates a new active indices cache for storing/accessing active validator indices.
|
||||
func NewActiveIndicesCache() *ActiveIndicesCache {
|
||||
return &ActiveIndicesCache{
|
||||
activeIndicesCache: cache.NewFIFO(activeIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveIndicesInEpoch fetches ActiveIndicesByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveIndicesInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesInEpoch(epoch uint64) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeIndicesCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeIndicesCacheHit.Inc()
|
||||
} else {
|
||||
activeIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return nil, ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveIndices, nil
|
||||
}
|
||||
|
||||
// AddActiveIndicesList adds ActiveIndicesByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveIndicesByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveIndicesCache) AddActiveIndicesList(activeIndices *ActiveIndicesByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeIndicesCache.AddIfNotPresent(activeIndices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeIndicesCache, maxActiveIndicesListSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndicesKeys returns the keys of the active indices cache.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesKeys() []string {
|
||||
return c.activeIndicesCache.ListKeys()
|
||||
}
|
||||
82
beacon-chain/cache/active_indices_test.go
vendored
82
beacon-chain/cache/active_indices_test.go
vendored
@@ -1,82 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActiveIndicesKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := activeIndicesKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeIndicesKeyFn("bad")
|
||||
if err != ErrNotActiveIndicesInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveIndicesInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesCache_ActiveIndicesByEpoch(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
activeIndices, err := cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeIndices != nil {
|
||||
t.Error("Expected active indices not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeIndices, err = cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeIndices, aInfo.ActiveIndices) {
|
||||
t.Errorf(
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
aInfo.ActiveIndices,
|
||||
activeIndices,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndices_MaxSize(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
for i := uint64(0); i < 100; i++ {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeIndicesCache.ListKeys()) != maxActiveIndicesListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveIndicesListSize,
|
||||
len(cache.activeIndicesCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
17
beacon-chain/cache/attestation_data.go
vendored
17
beacon-chain/cache/attestation_data.go
vendored
@@ -10,8 +10,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
@@ -59,7 +58,7 @@ func NewAttestationCache() *AttestationCache {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest) (*ethpb.AttestationData, error) {
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
attestationCacheMiss.Inc()
|
||||
@@ -113,7 +112,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest)
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
|
||||
func (c *AttestationCache) MarkInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
@@ -135,7 +134,7 @@ func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
|
||||
func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
@@ -151,7 +150,7 @@ func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *pb.AttestationRequest, res *ethpb.AttestationData) error {
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
@@ -180,11 +179,11 @@ func wrapperToKey(i interface{}) (string, error) {
|
||||
return reqToKey(w.req)
|
||||
}
|
||||
|
||||
func reqToKey(req *pb.AttestationRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.Shard, req.Slot), nil
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
req *pb.AttestationRequest
|
||||
req *ethpb.AttestationDataRequest
|
||||
res *ethpb.AttestationData
|
||||
}
|
||||
|
||||
9
beacon-chain/cache/attestation_data_test.go
vendored
9
beacon-chain/cache/attestation_data_test.go
vendored
@@ -5,18 +5,17 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewAttestationCache()
|
||||
|
||||
req := &pb.AttestationRequest{
|
||||
Shard: 0,
|
||||
Slot: 1,
|
||||
req := ðpb.AttestationDataRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
response, err := c.Get(ctx, req)
|
||||
|
||||
45
beacon-chain/cache/benchmarks_test.go
vendored
45
beacon-chain/cache/benchmarks_test.go
vendored
@@ -1,45 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var indices300k = createIndices(300000)
|
||||
var epoch = uint64(1)
|
||||
|
||||
func createIndices(count int) *ActiveIndicesByEpoch {
|
||||
indices := make([]uint64, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
indices = append(indices, uint64(i))
|
||||
}
|
||||
return &ActiveIndicesByEpoch{
|
||||
Epoch: epoch,
|
||||
ActiveIndices: indices,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachingAddRetrieve(b *testing.B) {
|
||||
|
||||
c := NewActiveIndicesCache()
|
||||
|
||||
b.Run("ADD300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := c.AddActiveIndicesList(indices300k); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RETR300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := c.ActiveIndicesInEpoch(epoch); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
2
beacon-chain/cache/checkpoint_state.go
vendored
2
beacon-chain/cache/checkpoint_state.go
vendored
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
2
beacon-chain/cache/checkpoint_state_test.go
vendored
2
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -4,8 +4,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
|
||||
182
beacon-chain/cache/committee.go
vendored
182
beacon-chain/cache/committee.go
vendored
@@ -2,11 +2,11 @@ package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -17,9 +17,10 @@ var (
|
||||
// a Committee struct.
|
||||
ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
|
||||
// maxShuffledIndicesSize defines the max number of shuffled indices list can cache.
|
||||
// 3 for previous, current epoch and next epoch.
|
||||
maxShuffledIndicesSize = 3
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
|
||||
// cache size as it considers 3 concurrent branches over 3 epochs.
|
||||
maxCommitteesCacheSize = 10
|
||||
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@@ -33,43 +34,47 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// Committee defines the committee per epoch and shard.
|
||||
type Committee struct {
|
||||
StartShard uint64
|
||||
CommitteeCount uint64
|
||||
Epoch uint64
|
||||
Committee []uint64
|
||||
// Committees defines the shuffled committees seed.
|
||||
type Committees struct {
|
||||
CommitteeCount uint64
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
}
|
||||
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by epoch and shard.
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the epoch as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*Committee)
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(info.Epoch)), nil
|
||||
return key(info.Seed), nil
|
||||
}
|
||||
|
||||
// NewCommitteeCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteeCache() *CommitteeCache {
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ShuffledIndices fetches the shuffled indices by epoch and shard. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with epoch and shard. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) ShuffledIndices(epoch uint64, shard uint64) ([]uint64, error) {
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -81,114 +86,48 @@ func (c *CommitteeCache) ShuffledIndices(epoch uint64, shard uint64) ([]uint64,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
start, end := startEndIndices(item, shard)
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
|
||||
return item.Committee[start:end], nil
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
|
||||
return item.ShuffledIndices[start:end], nil
|
||||
}
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committee *Committee) error {
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committee); err != nil {
|
||||
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.CommitteeCache, maxShuffledIndicesSize)
|
||||
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epochs returns the epochs stored in the committee cache. These are the keys to the cache.
|
||||
func (c *CommitteeCache) Epochs() ([]uint64, error) {
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
epochs := make([]uint64, len(c.CommitteeCache.ListKeys()))
|
||||
for i, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochs[i] = uint64(epoch)
|
||||
}
|
||||
return epochs, nil
|
||||
}
|
||||
|
||||
// EpochInCache returns true if an input epoch is part of keys in cache.
|
||||
func (c *CommitteeCache) EpochInCache(wantedEpoch uint64) (bool, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
for _, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if wantedEpoch == uint64(epoch) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CommitteeCount returns the total number of committees in a given epoch as stored in cache.
|
||||
func (c *CommitteeCache) CommitteeCount(epoch uint64) (uint64, bool, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return 0, false, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.CommitteeCount, true, nil
|
||||
}
|
||||
|
||||
// StartShard returns the start shard number in a given epoch as stored in cache.
|
||||
func (c *CommitteeCache) StartShard(epoch uint64) (uint64, bool, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return 0, false, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.StartShard, true, nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given epoch stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -200,20 +139,25 @@ func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.Committee, nil
|
||||
return item.SortedIndices, nil
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committee, wantedShard uint64) (uint64, uint64) {
|
||||
shardCount := params.BeaconConfig().ShardCount
|
||||
currentShard := (wantedShard + shardCount - c.StartShard) % shardCount
|
||||
validatorCount := uint64(len(c.Committee))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, currentShard)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, currentShard+1)
|
||||
|
||||
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.ShuffledIndices))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Using seed as source for key to handle reorgs in the same epoch.
|
||||
// The seed is derived from state's array of randao mixes and epoch value
|
||||
// hashed together. This avoids collisions on different validator set. Spec definition:
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_seed
|
||||
func key(seed [32]byte) string {
|
||||
return string(seed[:])
|
||||
}
|
||||
|
||||
226
beacon-chain/cache/committee_test.go
vendored
226
beacon-chain/cache/committee_test.go
vendored
@@ -2,23 +2,27 @@ package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committee{
|
||||
Epoch: 999,
|
||||
CommitteeCount: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5},
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := committeeKeyFn(item)
|
||||
k, err := committeeKeyFn(item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(item.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(item.Epoch)))
|
||||
if k != key(item.Seed) {
|
||||
t.Errorf("Incorrect hash k: %s, expected %s", k, key(item.Seed))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,18 +34,17 @@ func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committee{
|
||||
Epoch: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5, 6},
|
||||
CommitteeCount: 3,
|
||||
StartShard: 1,
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
epoch := uint64(1)
|
||||
startShard := uint64(1)
|
||||
indices, err := cache.ShuffledIndices(epoch, startShard)
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -52,164 +55,27 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedShard := uint64(2)
|
||||
indices, err = cache.ShuffledIndices(epoch, wantedShard)
|
||||
wantedIndex := uint64(0)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
start, end := startEndIndices(item, wantedShard)
|
||||
if !reflect.DeepEqual(indices, item.Committee[start:end]) {
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
if !reflect.DeepEqual(indices, item.ShuffledIndices[start:end]) {
|
||||
t.Errorf(
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
indices,
|
||||
item.Committee[start:end],
|
||||
item.ShuffledIndices[start:end],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
item1 := &Committee{Epoch: 1}
|
||||
if err := cache.AddCommitteeShuffledList(item1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
item2 := &Committee{Epoch: 2}
|
||||
if err := cache.AddCommitteeShuffledList(item2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err := cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := item1.Epoch + item2.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
item3 := &Committee{Epoch: 4}
|
||||
if err := cache.AddCommitteeShuffledList(item3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item1.Epoch + item2.Epoch + item3.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
item4 := &Committee{Epoch: 6}
|
||||
if err := cache.AddCommitteeShuffledList(item4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item2.Epoch + item3.Epoch + item4.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_EpochInCache(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 99}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 100}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inCache, err := cache.EpochInCache(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inCache {
|
||||
t.Error("Epoch shouldn't be in cache")
|
||||
}
|
||||
inCache, err = cache.EpochInCache(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !inCache {
|
||||
t.Error("Epoch should be in cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CommitteesCount(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
|
||||
committeeCount := uint64(3)
|
||||
epoch := uint64(10)
|
||||
item := &Committee{Epoch: epoch, CommitteeCount: committeeCount}
|
||||
|
||||
_, exists, err := cache.CommitteeCount(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
count, exists, err := cache.CommitteeCount(epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Error("Expected committee count to be in cache")
|
||||
}
|
||||
if count != committeeCount {
|
||||
t.Errorf("wanted: %d, got: %d", committeeCount, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ShardCount(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
|
||||
startShard := uint64(7)
|
||||
epoch := uint64(3)
|
||||
item := &Committee{Epoch: epoch, StartShard: startShard}
|
||||
|
||||
_, exists, err := cache.StartShard(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Error("Expected start shard not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shard, exists, err := cache.StartShard(epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Error("Expected start shard to be in cache")
|
||||
}
|
||||
if shard != startShard {
|
||||
t.Errorf("wanted: %d, got: %d", startShard, shard)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committee{Epoch: 1, Committee: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(1)
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -221,19 +87,41 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err = cache.ActiveIndices(1)
|
||||
indices, err = cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, item.Committee) {
|
||||
if !reflect.DeepEqual(indices, item.SortedIndices) {
|
||||
t.Error("Did not receive correct active indices from cache")
|
||||
}
|
||||
}
|
||||
|
||||
func sum(values []uint64) uint64 {
|
||||
sum := uint64(0)
|
||||
for _, v := range values {
|
||||
sum = v + sum
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
// Should rotate out all the epochs except 190 through 199.
|
||||
for i := 100; i < 200; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.ListKeys()
|
||||
if len(k) != maxCommitteesCacheSize {
|
||||
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
|
||||
}
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i] < k[j]
|
||||
})
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(190)))
|
||||
if k[0] != key(s) {
|
||||
t.Error("incorrect key received for slot 190")
|
||||
}
|
||||
s = bytesutil.ToBytes32([]byte(strconv.Itoa(199)))
|
||||
if k[len(k)-1] != key(s) {
|
||||
t.Error("incorrect key received for slot 199")
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
7
beacon-chain/cache/depositcache/BUILD.bazel
vendored
7
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -9,10 +9,12 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -26,9 +28,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -33,28 +35,19 @@ type DepositFetcher interface {
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*DepositContainer
|
||||
deposits []*DepositContainer
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainstartPubkeys map[string]bool
|
||||
chainstartPubkeysLock sync.RWMutex
|
||||
}
|
||||
|
||||
// DepositContainer object for holding the deposit and a reference to the block in
|
||||
// which the deposit transaction was included in the proof of work chain.
|
||||
type DepositContainer struct {
|
||||
Deposit *ethpb.Deposit
|
||||
Block *big.Int
|
||||
Index int
|
||||
depositRoot [32]byte
|
||||
}
|
||||
|
||||
// NewDepositCache instantiates a new deposit cache
|
||||
func NewDepositCache() *DepositCache {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*DepositContainer{},
|
||||
deposits: []*DepositContainer{},
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
chainstartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
}
|
||||
@@ -62,10 +55,10 @@ func NewDepositCache() *DepositCache {
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertDeposit")
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
@@ -78,14 +71,36 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
defer dc.depositsLock.Unlock()
|
||||
// keep the slice sorted on insertion in order to avoid costly sorting on retrival.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
newDeposits := append([]*DepositContainer{{Deposit: d, Block: blockNum, depositRoot: depositRoot, Index: index}}, dc.deposits[heightIdx:]...)
|
||||
newDeposits := append([]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}}, dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
historicalDepositsCount.Inc()
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// AllDepositContainers returns a list of deposits all historical deposit containers until the given block number.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
}
|
||||
|
||||
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.MarkPubkeyForChainstart")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
@@ -94,7 +109,7 @@ func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey stri
|
||||
|
||||
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PubkeyInChainstart")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
@@ -108,14 +123,14 @@ func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) b
|
||||
// AllDeposits returns a list of deposits all historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
@@ -125,23 +140,23 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Beacondb.DepositsNumberAndRootAtHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Block.Cmp(blockHeight) > 0 })
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), dc.deposits[heightIdx-1].depositRoot
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositByPubkey")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -151,7 +166,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = ctnr.Block
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
149
beacon-chain/cache/depositcache/deposits_test.go
vendored
149
beacon-chain/cache/depositcache/deposits_test.go
vendored
@@ -6,7 +6,8 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -19,21 +20,7 @@ func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, big.NewInt(1), 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
}
|
||||
if hook.LastEntry().Message != nilDepositErr {
|
||||
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_InsertDeposit_LogsOnNilBlockNumberInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), ðpb.Deposit{}, nil, 0, [32]byte{})
|
||||
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
@@ -47,27 +34,27 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
insertions := []struct {
|
||||
blkNum *big.Int
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
},
|
||||
@@ -77,7 +64,7 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
expectedIndices := []int{0, 1, 3, 4}
|
||||
expectedIndices := []int64{0, 1, 3, 4}
|
||||
for i, ei := range expectedIndices {
|
||||
if dc.deposits[i].Index != ei {
|
||||
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
|
||||
@@ -88,34 +75,34 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
@@ -129,34 +116,34 @@ func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
@@ -171,35 +158,35 @@ func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testi
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -216,16 +203,16 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -242,9 +229,9 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLes
|
||||
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(9),
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk0"),
|
||||
@@ -252,7 +239,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
@@ -260,7 +247,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
@@ -268,7 +255,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk2"),
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -23,15 +24,15 @@ var (
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertPendingDeposit")
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
@@ -40,7 +41,8 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits, &DepositContainer{Deposit: d, Block: blockNum, Index: index, depositRoot: depositRoot})
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
&dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
@@ -54,9 +56,9 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
@@ -77,15 +79,15 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer {
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
@@ -151,9 +153,9 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*DepositContainer
|
||||
var cleanDeposits []*dbpb.DepositContainer
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
if dp.Index >= int64(merkleTreeIndex) {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, big.NewInt(111), 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Error("Deposit not inserted")
|
||||
@@ -23,7 +24,7 @@ func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, nil /*blockNum*/, 0, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) > 0 {
|
||||
t.Error("Unexpected deposit insertion")
|
||||
@@ -34,7 +35,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
depToRemove := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
otherDep := ðpb.Deposit{Proof: [][]byte{[]byte("B")}}
|
||||
db.pendingDeposits = []*DepositContainer{
|
||||
db.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
@@ -47,7 +48,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Errorf("Deposit unexpectedly removed")
|
||||
@@ -57,7 +58,7 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dep := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, big.NewInt(111), 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
if len(dc.pendingDeposits) != 0 {
|
||||
t.Error("Failed to insert & delete a pending deposit")
|
||||
@@ -67,10 +68,10 @@ func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Block: big.NewInt(4), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Block: big.NewInt(6), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
@@ -92,25 +93,24 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
@@ -119,40 +119,40 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*DepositContainer{
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
|
||||
7
beacon-chain/cache/feature_flag_test.go
vendored
7
beacon-chain/cache/feature_flag_test.go
vendored
@@ -3,8 +3,9 @@ package cache
|
||||
import "github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
|
||||
func init() {
|
||||
featureconfig.Init(&featureconfig.Flag{
|
||||
EnableAttestationCache: true,
|
||||
EnableEth1DataVoteCache: true,
|
||||
featureconfig.Init(&featureconfig.Flags{
|
||||
EnableAttestationCache: true,
|
||||
EnableEth1DataVoteCache: true,
|
||||
EnableShuffledIndexCache: true,
|
||||
})
|
||||
}
|
||||
|
||||
99
beacon-chain/cache/shuffled_indices.go
vendored
99
beacon-chain/cache/shuffled_indices.go
vendored
@@ -1,99 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotValidatorListInfo will be returned when a cache object is not a pointer to
|
||||
// a ValidatorList struct.
|
||||
ErrNotValidatorListInfo = errors.New("object is not a shuffled validator list")
|
||||
|
||||
// maxShuffledListSize defines the max number of shuffled list can cache.
|
||||
maxShuffledListSize = 1000
|
||||
|
||||
// Metrics.
|
||||
shuffledIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "shuffled_validators_cache_miss",
|
||||
Help: "The number of shuffled validators requests that aren't present in the cache.",
|
||||
})
|
||||
shuffledIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "shuffled_validators_cache_hit",
|
||||
Help: "The number of shuffled validators requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// IndicesByIndexSeed defines the shuffled validator indices per randao seed.
|
||||
type IndicesByIndexSeed struct {
|
||||
Index uint64
|
||||
Seed []byte
|
||||
ShuffledIndices []uint64
|
||||
}
|
||||
|
||||
// ShuffledIndicesCache is a struct with 1 queue for looking up shuffled validators by seed.
|
||||
type ShuffledIndicesCache struct {
|
||||
shuffledIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// slotKeyFn takes the randao seed as the key for the shuffled validators of a given epoch.
|
||||
func shuffleKeyFn(obj interface{}) (string, error) {
|
||||
sInfo, ok := obj.(*IndicesByIndexSeed)
|
||||
if !ok {
|
||||
return "", ErrNotValidatorListInfo
|
||||
}
|
||||
|
||||
return string(sInfo.Seed) + strconv.Itoa(int(sInfo.Index)), nil
|
||||
}
|
||||
|
||||
// NewShuffledIndicesCache creates a new shuffled validators cache for storing/accessing shuffled validator indices
|
||||
func NewShuffledIndicesCache() *ShuffledIndicesCache {
|
||||
return &ShuffledIndicesCache{
|
||||
shuffledIndicesCache: cache.NewFIFO(shuffleKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// IndicesByIndexSeed fetches IndicesByIndexSeed by epoch and seed. Returns true with a
|
||||
// reference to the ShuffledIndicesInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ShuffledIndicesCache) IndicesByIndexSeed(index uint64, seed []byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
key := string(seed) + strconv.Itoa(int(index))
|
||||
obj, exists, err := c.shuffledIndicesCache.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
shuffledIndicesCacheHit.Inc()
|
||||
} else {
|
||||
shuffledIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cInfo, ok := obj.(*IndicesByIndexSeed)
|
||||
if !ok {
|
||||
return nil, ErrNotValidatorListInfo
|
||||
}
|
||||
|
||||
return cInfo.ShuffledIndices, nil
|
||||
}
|
||||
|
||||
// AddShuffledValidatorList adds IndicesByIndexSeed object to the cache. This method also trims the least
|
||||
// recently added IndicesByIndexSeed object if the cache size has ready the max cache size limit.
|
||||
func (c *ShuffledIndicesCache) AddShuffledValidatorList(shuffledIndices *IndicesByIndexSeed) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.shuffledIndicesCache.AddIfNotPresent(shuffledIndices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.shuffledIndicesCache, maxShuffledListSize)
|
||||
return nil
|
||||
}
|
||||
85
beacon-chain/cache/shuffled_indices_test.go
vendored
85
beacon-chain/cache/shuffled_indices_test.go
vendored
@@ -1,85 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestShuffleKeyFn_OK(t *testing.T) {
|
||||
sInfo := &IndicesByIndexSeed{
|
||||
Index: 999,
|
||||
Seed: []byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := shuffleKeyFn(sInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != string(sInfo.Seed)+strconv.Itoa(int(sInfo.Index)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, string(sInfo.Seed)+strconv.Itoa(int(sInfo.Index)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffleKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := shuffleKeyFn("bad")
|
||||
if err != ErrNotValidatorListInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotValidatorListInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffledIndicesCache_ShuffledIndicesBySeed2(t *testing.T) {
|
||||
cache := NewShuffledIndicesCache()
|
||||
|
||||
sInfo := &IndicesByIndexSeed{
|
||||
Index: 99,
|
||||
Seed: []byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
shuffledIndices, err := cache.IndicesByIndexSeed(sInfo.Index, sInfo.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if shuffledIndices != nil {
|
||||
t.Error("Expected shuffled indices not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddShuffledValidatorList(sInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shuffledIndices, err = cache.IndicesByIndexSeed(sInfo.Index, sInfo.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(shuffledIndices, sInfo.ShuffledIndices) {
|
||||
t.Errorf(
|
||||
"Expected fetched info committee to be %v, got %v",
|
||||
sInfo.ShuffledIndices,
|
||||
shuffledIndices,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffledIndices_MaxSize(t *testing.T) {
|
||||
cache := NewShuffledIndicesCache()
|
||||
|
||||
for i := uint64(0); i < 1001; i++ {
|
||||
sInfo := &IndicesByIndexSeed{
|
||||
Index: i,
|
||||
Seed: []byte{byte(i)},
|
||||
}
|
||||
if err := cache.AddShuffledValidatorList(sInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.shuffledIndicesCache.ListKeys()) != maxShuffledListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxShuffledListSize,
|
||||
len(cache.shuffledIndicesCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -17,16 +17,17 @@ go_library(
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -37,6 +38,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_operations_test.go",
|
||||
"block_test.go",
|
||||
"eth1_data_test.go",
|
||||
@@ -44,18 +46,15 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_phoreproject_bls//:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -4,18 +4,20 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// NewGenesisBlock returns the canonical, genesis block for the beacon chain protocol.
|
||||
func NewGenesisBlock(stateRoot []byte) *ethpb.BeaconBlock {
|
||||
func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
genBlock := ðpb.BeaconBlock{
|
||||
ParentRoot: zeroHash,
|
||||
StateRoot: stateRoot,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
}
|
||||
return genBlock
|
||||
return ðpb.SignedBeaconBlock{
|
||||
Block: genBlock,
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,23 +4,24 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state/stateutils"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
@@ -32,7 +33,31 @@ var log = logrus.WithField("prefix", "blocks")
|
||||
|
||||
var eth1DataCache = cache.NewEth1DataVoteCache()
|
||||
|
||||
// ErrSigFailedToVerify returns when a signature of a block object(ie attestation, slashing, exit... etc)
|
||||
// failed to verify.
|
||||
var ErrSigFailedToVerify = errors.New("signature did not verify")
|
||||
|
||||
func verifySigningRoot(obj interface{}, pub []byte, signature []byte, domain uint64) error {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(signature)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(obj)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root")
|
||||
}
|
||||
if !sig.Verify(root[:], publicKey, domain) {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: This method uses deprecated ssz.SigningRoot.
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain uint64) error {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -46,7 +71,7 @@ func verifySigningRoot(obj interface{}, pub []byte, signature []byte, domain uin
|
||||
return errors.Wrap(err, "could not get signing root")
|
||||
}
|
||||
if !sig.Verify(root[:], publicKey, domain) {
|
||||
return fmt.Errorf("signature did not verify")
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -61,7 +86,7 @@ func verifySignature(signedData []byte, pub []byte, signature []byte, domain uin
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
if !sig.Verify(signedData, publicKey, domain) {
|
||||
return fmt.Errorf("signature did not verify")
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -157,11 +182,11 @@ func Eth1DataHasEnoughSupport(beaconState *pb.BeaconState, data *ethpb.Eth1Data)
|
||||
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
func ProcessBlockHeader(
|
||||
beaconState *pb.BeaconState,
|
||||
block *ethpb.BeaconBlock,
|
||||
block *ethpb.SignedBeaconBlock,
|
||||
) (*pb.BeaconState, error) {
|
||||
beaconState, err := ProcessBlockHeaderNoVerify(beaconState, block)
|
||||
beaconState, err := ProcessBlockHeaderNoVerify(beaconState, block.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block header")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idx, err := helpers.BeaconProposerIndex(beaconState)
|
||||
@@ -169,15 +194,12 @@ func ProcessBlockHeader(
|
||||
return nil, err
|
||||
}
|
||||
proposer := beaconState.Validators[idx]
|
||||
if proposer.Slashed {
|
||||
return nil, fmt.Errorf("proposer at index %d was previously slashed", idx)
|
||||
}
|
||||
|
||||
// Verify proposer signature.
|
||||
currentEpoch := helpers.CurrentEpoch(beaconState)
|
||||
domain := helpers.Domain(beaconState.Fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err := verifySigningRoot(block, proposer.PublicKey, block.Signature, domain); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify block signature")
|
||||
if err := verifySigningRoot(block.Block, proposer.PublicKey, block.Signature, domain); err != nil {
|
||||
return nil, ErrSigFailedToVerify
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
@@ -210,11 +232,14 @@ func ProcessBlockHeaderNoVerify(
|
||||
beaconState *pb.BeaconState,
|
||||
block *ethpb.BeaconBlock,
|
||||
) (*pb.BeaconState, error) {
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
if beaconState.Slot != block.Slot {
|
||||
return nil, fmt.Errorf("state slot: %d is different then block slot: %d", beaconState.Slot, block.Slot)
|
||||
}
|
||||
|
||||
parentRoot, err := ssz.SigningRoot(beaconState.LatestBlockHeader)
|
||||
parentRoot, err := ssz.HashTreeRoot(beaconState.LatestBlockHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,17 +249,24 @@ func ProcessBlockHeaderNoVerify(
|
||||
block.ParentRoot, parentRoot)
|
||||
}
|
||||
|
||||
idx, err := helpers.BeaconProposerIndex(beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer := beaconState.Validators[idx]
|
||||
if proposer.Slashed {
|
||||
return nil, fmt.Errorf("proposer at index %d was previously slashed", idx)
|
||||
}
|
||||
|
||||
bodyRoot, err := ssz.HashTreeRoot(block.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
emptySig := make([]byte, 96)
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: block.Slot,
|
||||
ParentRoot: block.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
Signature: emptySig,
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
@@ -320,8 +352,8 @@ func ProcessRandaoNoVerify(
|
||||
// Process ``ProposerSlashing`` operation.
|
||||
// """
|
||||
// proposer = state.validator_registry[proposer_slashing.proposer_index]
|
||||
// # Verify that the epoch is the same
|
||||
// assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot)
|
||||
// # Verify slots match
|
||||
// assert proposer_slashing.header_1.slot == proposer_slashing.header_2.slot
|
||||
// # But the headers are different
|
||||
// assert proposer_slashing.header_1 != proposer_slashing.header_2
|
||||
// # Check proposer is slashable
|
||||
@@ -356,12 +388,10 @@ func VerifyProposerSlashing(
|
||||
beaconState *pb.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
) error {
|
||||
headerEpoch1 := helpers.SlotToEpoch(slashing.Header_1.Slot)
|
||||
headerEpoch2 := helpers.SlotToEpoch(slashing.Header_2.Slot)
|
||||
proposer := beaconState.Validators[slashing.ProposerIndex]
|
||||
|
||||
if headerEpoch1 != headerEpoch2 {
|
||||
return fmt.Errorf("mismatched header epochs, received %d == %d", headerEpoch1, headerEpoch2)
|
||||
if slashing.Header_1.Header.Slot != slashing.Header_2.Header.Slot {
|
||||
return fmt.Errorf("mismatched header slots, received %d == %d", slashing.Header_1.Header.Slot, slashing.Header_2.Header.Slot)
|
||||
}
|
||||
if proto.Equal(slashing.Header_1, slashing.Header_2) {
|
||||
return errors.New("expected slashing headers to differ")
|
||||
@@ -370,10 +400,10 @@ func VerifyProposerSlashing(
|
||||
return fmt.Errorf("validator with key %#x is not slashable", proposer.PublicKey)
|
||||
}
|
||||
// Using headerEpoch1 here because both of the headers should have the same epoch.
|
||||
domain := helpers.Domain(beaconState.Fork, headerEpoch1, params.BeaconConfig().DomainBeaconProposer)
|
||||
headers := append([]*ethpb.BeaconBlockHeader{slashing.Header_1}, slashing.Header_2)
|
||||
domain := helpers.Domain(beaconState.Fork, helpers.StartSlot(slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer)
|
||||
headers := []*ethpb.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2}
|
||||
for _, header := range headers {
|
||||
if err := verifySigningRoot(header, proposer.PublicKey, header.Signature, domain); err != nil {
|
||||
if err := verifySigningRoot(header.Header, proposer.PublicKey, header.Signature, domain); err != nil {
|
||||
return errors.Wrap(err, "could not verify beacon block header")
|
||||
}
|
||||
}
|
||||
@@ -386,19 +416,15 @@ func VerifyProposerSlashing(
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
|
||||
// """
|
||||
// Process ``AttesterSlashing`` operation.
|
||||
// """
|
||||
// attestation_1 = attester_slashing.attestation_1
|
||||
// attestation_2 = attester_slashing.attestation_2
|
||||
// assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
|
||||
// validate_indexed_attestation(state, attestation_1)
|
||||
// validate_indexed_attestation(state, attestation_2)
|
||||
// assert is_valid_indexed_attestation(state, attestation_1)
|
||||
// assert is_valid_indexed_attestation(state, attestation_2)
|
||||
//
|
||||
// slashed_any = False
|
||||
// attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
|
||||
// attesting_indices_2 = attestation_2.custody_bit_0_indices + attestation_2.custody_bit_1_indices
|
||||
// for index in sorted(set(attesting_indices_1).intersection(attesting_indices_2)):
|
||||
// indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
|
||||
// for index in sorted(indices):
|
||||
// if is_slashable_validator(state.validators[index], get_current_epoch(state)):
|
||||
// slash_validator(state, index)
|
||||
// slashed_any = True
|
||||
@@ -470,10 +496,8 @@ func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.Attes
|
||||
}
|
||||
|
||||
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
att1 := slashing.Attestation_1
|
||||
att2 := slashing.Attestation_1
|
||||
indices1 := append(att1.CustodyBit_0Indices, att1.CustodyBit_1Indices...)
|
||||
indices2 := append(att2.CustodyBit_0Indices, att2.CustodyBit_1Indices...)
|
||||
indices1 := slashing.Attestation_1.AttestingIndices
|
||||
indices2 := slashing.Attestation_1.AttestingIndices
|
||||
return sliceutil.IntersectionUint64(indices1, indices2)
|
||||
}
|
||||
|
||||
@@ -508,40 +532,28 @@ func ProcessAttestationsNoVerify(ctx context.Context, beaconState *pb.BeaconStat
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
// """
|
||||
// Process ``Attestation`` operation.
|
||||
// """
|
||||
// data = attestation.data
|
||||
// assert data.crosslink.shard < SHARD_COUNT
|
||||
// assert data.index < get_committee_count_at_slot(state, data.slot)
|
||||
// assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
// assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
// assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
//
|
||||
// attestation_slot = get_attestation_data_slot(state, data)
|
||||
// assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH
|
||||
//
|
||||
// committee = get_crosslink_committee(state, data.target.epoch, data.crosslink.shard)
|
||||
// assert len(attestation.aggregation_bits) == len(attestation.custody_bits) == len(committee)
|
||||
// committee = get_beacon_committee(state, data.slot, data.index)
|
||||
// assert len(attestation.aggregation_bits) == len(committee)
|
||||
//
|
||||
// pending_attestation = PendingAttestation(
|
||||
// data=data,
|
||||
// aggregation_bitfield=attestation.aggregation_bitfield,
|
||||
// inclusion_delay=state.slot - attestation_slot,
|
||||
// aggregation_bits=attestation.aggregation_bits,
|
||||
// inclusion_delay=state.slot - data.slot,
|
||||
// proposer_index=get_beacon_proposer_index(state),
|
||||
// )
|
||||
//
|
||||
// if data.target_epoch == get_current_epoch(state):
|
||||
// assert data.source == state.current_justified_checkpoint
|
||||
// parent_crosslink = state.current_crosslinks[data.crosslink.shard]
|
||||
// state.current_epoch_attestations.append(pending_attestation)
|
||||
// if data.target.epoch == get_current_epoch(state):
|
||||
// assert data.source == state.current_justified_checkpoint
|
||||
// state.current_epoch_attestations.append(pending_attestation)
|
||||
// else:
|
||||
// assert data.source == state.previous_justified_checkpoint
|
||||
// parent_crosslink = state.previous_crosslinks[data.crosslink.shard]
|
||||
// state.previous_epoch_attestations.append(pending_attestation)
|
||||
//
|
||||
// # Check crosslink against expected parent crosslink
|
||||
// assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
|
||||
// assert data.crosslink.start_epoch == parent_crosslink.end_epoch
|
||||
// assert data.crosslink.end_epoch == min(data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
|
||||
// assert data.crosslink.data_root == Bytes32() # [to be removed in phase 1]
|
||||
// assert data.source == state.previous_justified_checkpoint
|
||||
// state.previous_epoch_attestations.append(pending_attestation)
|
||||
//
|
||||
// # Check signature
|
||||
// assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
@@ -559,16 +571,11 @@ func ProcessAttestationNoVerify(ctx context.Context, beaconState *pb.BeaconState
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerify")
|
||||
defer span.End()
|
||||
|
||||
data := att.Data
|
||||
|
||||
if data.Crosslink.Shard > params.BeaconConfig().ShardCount {
|
||||
return nil, fmt.Errorf(
|
||||
"expected crosslink shard %d to be less than SHARD_COUNT %d",
|
||||
data.Crosslink.Shard,
|
||||
params.BeaconConfig().ShardCount,
|
||||
)
|
||||
if att == nil || att.Data == nil || att.Data.Target == nil {
|
||||
return nil, errors.New("nil attestation data target")
|
||||
}
|
||||
|
||||
data := att.Data
|
||||
if data.Target.Epoch != helpers.PrevEpoch(beaconState) && data.Target.Epoch != helpers.CurrentEpoch(beaconState) {
|
||||
return nil, fmt.Errorf(
|
||||
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
|
||||
@@ -577,17 +584,17 @@ func ProcessAttestationNoVerify(ctx context.Context, beaconState *pb.BeaconState
|
||||
helpers.CurrentEpoch(beaconState),
|
||||
)
|
||||
}
|
||||
|
||||
attestationSlot, err := helpers.AttestationDataSlot(beaconState, data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation slot")
|
||||
if helpers.SlotToEpoch(data.Slot) != data.Target.Epoch {
|
||||
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(data.Slot), data.Target.Epoch)
|
||||
}
|
||||
minInclusionCheck := attestationSlot+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot
|
||||
epochInclusionCheck := beaconState.Slot <= attestationSlot+params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
s := att.Data.Slot
|
||||
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot
|
||||
epochInclusionCheck := beaconState.Slot <= s+params.BeaconConfig().SlotsPerEpoch
|
||||
if !minInclusionCheck {
|
||||
return nil, fmt.Errorf(
|
||||
"attestation slot %d + inclusion delay %d > state slot %d",
|
||||
attestationSlot,
|
||||
s,
|
||||
params.BeaconConfig().MinAttestationInclusionDelay,
|
||||
beaconState.Slot,
|
||||
)
|
||||
@@ -596,7 +603,7 @@ func ProcessAttestationNoVerify(ctx context.Context, beaconState *pb.BeaconState
|
||||
return nil, fmt.Errorf(
|
||||
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
|
||||
beaconState.Slot,
|
||||
attestationSlot,
|
||||
s,
|
||||
params.BeaconConfig().SlotsPerEpoch,
|
||||
)
|
||||
}
|
||||
@@ -612,34 +619,22 @@ func ProcessAttestationNoVerify(ctx context.Context, beaconState *pb.BeaconState
|
||||
pendingAtt := &pb.PendingAttestation{
|
||||
Data: data,
|
||||
AggregationBits: att.AggregationBits,
|
||||
InclusionDelay: beaconState.Slot - attestationSlot,
|
||||
InclusionDelay: beaconState.Slot - s,
|
||||
ProposerIndex: proposerIndex,
|
||||
}
|
||||
|
||||
var ffgSourceEpoch uint64
|
||||
var ffgSourceRoot []byte
|
||||
var ffgTargetEpoch uint64
|
||||
var parentCrosslink *ethpb.Crosslink
|
||||
if data.Target.Epoch == helpers.CurrentEpoch(beaconState) {
|
||||
ffgSourceEpoch = beaconState.CurrentJustifiedCheckpoint.Epoch
|
||||
ffgSourceRoot = beaconState.CurrentJustifiedCheckpoint.Root
|
||||
ffgTargetEpoch = helpers.CurrentEpoch(beaconState)
|
||||
crosslinkShard := data.Crosslink.Shard
|
||||
if int(crosslinkShard) >= len(beaconState.CurrentCrosslinks) {
|
||||
return nil, fmt.Errorf("invalid shard given in attestation: %d", crosslinkShard)
|
||||
}
|
||||
|
||||
parentCrosslink = beaconState.CurrentCrosslinks[crosslinkShard]
|
||||
beaconState.CurrentEpochAttestations = append(beaconState.CurrentEpochAttestations, pendingAtt)
|
||||
} else {
|
||||
ffgSourceEpoch = beaconState.PreviousJustifiedCheckpoint.Epoch
|
||||
ffgSourceRoot = beaconState.PreviousJustifiedCheckpoint.Root
|
||||
ffgTargetEpoch = helpers.PrevEpoch(beaconState)
|
||||
crosslinkShard := data.Crosslink.Shard
|
||||
if int(crosslinkShard) >= len(beaconState.PreviousCrosslinks) {
|
||||
return nil, fmt.Errorf("invalid shard given in attestation: %d", crosslinkShard)
|
||||
}
|
||||
parentCrosslink = beaconState.PreviousCrosslinks[crosslinkShard]
|
||||
beaconState.PreviousEpochAttestations = append(beaconState.PreviousEpochAttestations, pendingAtt)
|
||||
}
|
||||
if data.Source.Epoch != ffgSourceEpoch {
|
||||
@@ -651,92 +646,44 @@ func ProcessAttestationNoVerify(ctx context.Context, beaconState *pb.BeaconState
|
||||
if data.Target.Epoch != ffgTargetEpoch {
|
||||
return nil, fmt.Errorf("expected target epoch %d, received %d", ffgTargetEpoch, data.Target.Epoch)
|
||||
}
|
||||
endEpoch := parentCrosslink.EndEpoch + params.BeaconConfig().MaxEpochsPerCrosslink
|
||||
if data.Target.Epoch < endEpoch {
|
||||
endEpoch = data.Target.Epoch
|
||||
}
|
||||
if data.Crosslink.StartEpoch != parentCrosslink.EndEpoch {
|
||||
return nil, fmt.Errorf("expected crosslink start epoch %d, received %d",
|
||||
parentCrosslink.EndEpoch, data.Crosslink.StartEpoch)
|
||||
}
|
||||
if data.Crosslink.EndEpoch != endEpoch {
|
||||
return nil, fmt.Errorf("expected crosslink end epoch %d, received %d",
|
||||
endEpoch, data.Crosslink.EndEpoch)
|
||||
}
|
||||
crosslinkParentRoot, err := ssz.HashTreeRoot(parentCrosslink)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not tree hash parent crosslink")
|
||||
}
|
||||
if !bytes.Equal(data.Crosslink.ParentRoot, crosslinkParentRoot[:]) {
|
||||
return nil, fmt.Errorf(
|
||||
"mismatched parent crosslink root, expected %#x, received %#x",
|
||||
crosslinkParentRoot,
|
||||
data.Crosslink.ParentRoot,
|
||||
)
|
||||
}
|
||||
|
||||
// To be removed in Phase 1
|
||||
if !bytes.Equal(data.Crosslink.DataRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
return nil, fmt.Errorf("expected data root %#x == ZERO_HASH", data.Crosslink.DataRoot)
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ConvertToIndexed converts attestation to (almost) indexed-verifiable form.
|
||||
//
|
||||
// Note about spec pseudocode definition. The state was used by get_attesting_indices to determine
|
||||
// the attestation committee. Now that we provide this as an argument, we no longer need to provide
|
||||
// a state.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_indexed_attestation(state: BeaconState, attestation: Attestation) -> IndexedAttestation:
|
||||
// """
|
||||
// Return the indexed attestation corresponding to ``attestation``.
|
||||
// """
|
||||
// attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
// custody_bit_1_indices = get_attesting_indices(state, attestation.data, attestation.custody_bits)
|
||||
// assert custody_bit_1_indices.issubset(attesting_indices)
|
||||
// custody_bit_0_indices = attesting_indices.difference(custody_bit_1_indices)
|
||||
//
|
||||
// return IndexedAttestation(
|
||||
// custody_bit_0_indices=sorted(custody_bit_0_indices),
|
||||
// custody_bit_1_indices=sorted(custody_bit_1_indices),
|
||||
// attesting_indices=sorted(attesting_indices),
|
||||
// data=attestation.data,
|
||||
// signature=attestation.signature,
|
||||
// )
|
||||
func ConvertToIndexed(ctx context.Context, state *pb.BeaconState, attestation *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
func ConvertToIndexed(ctx context.Context, attestation *ethpb.Attestation, committee []uint64) (*ethpb.IndexedAttestation, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ConvertToIndexed")
|
||||
defer span.End()
|
||||
|
||||
attIndices, err := helpers.AttestingIndices(state, attestation.Data, attestation.AggregationBits)
|
||||
attIndices, err := helpers.AttestingIndices(attestation.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attesting indices")
|
||||
}
|
||||
cb1i, err := helpers.AttestingIndices(state, attestation.Data, attestation.CustodyBits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !sliceutil.SubsetUint64(cb1i, attIndices) {
|
||||
return nil, fmt.Errorf("%v is not a subset of %v", cb1i, attIndices)
|
||||
}
|
||||
cb1Map := make(map[uint64]bool)
|
||||
for _, idx := range cb1i {
|
||||
cb1Map[idx] = true
|
||||
}
|
||||
cb0i := []uint64{}
|
||||
for _, idx := range attIndices {
|
||||
if !cb1Map[idx] {
|
||||
cb0i = append(cb0i, idx)
|
||||
}
|
||||
}
|
||||
sort.Slice(cb0i, func(i, j int) bool {
|
||||
return cb0i[i] < cb0i[j]
|
||||
})
|
||||
|
||||
sort.Slice(cb1i, func(i, j int) bool {
|
||||
return cb1i[i] < cb1i[j]
|
||||
sort.Slice(attIndices, func(i, j int) bool {
|
||||
return attIndices[i] < attIndices[j]
|
||||
})
|
||||
inAtt := ðpb.IndexedAttestation{
|
||||
Data: attestation.Data,
|
||||
Signature: attestation.Signature,
|
||||
CustodyBit_0Indices: cb0i,
|
||||
CustodyBit_1Indices: cb1i,
|
||||
Data: attestation.Data,
|
||||
Signature: attestation.Signature,
|
||||
AttestingIndices: attIndices,
|
||||
}
|
||||
return inAtt, nil
|
||||
}
|
||||
@@ -748,33 +695,19 @@ func ConvertToIndexed(ctx context.Context, state *pb.BeaconState, attestation *e
|
||||
// """
|
||||
// Check if ``indexed_attestation`` has valid indices and signature.
|
||||
// """
|
||||
// bit_0_indices = indexed_attestation.custody_bit_0_indices
|
||||
// bit_1_indices = indexed_attestation.custody_bit_1_indices
|
||||
// indices = indexed_attestation.attesting_indices
|
||||
//
|
||||
// # Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||
// if not len(bit_1_indices) == 0:
|
||||
// return False
|
||||
// # Verify max number of indices
|
||||
// if not len(bit_0_indices) + len(bit_1_indices) <= MAX_VALIDATORS_PER_COMMITTEE:
|
||||
// return False
|
||||
// # Verify index sets are disjoint
|
||||
// if not len(set(bit_0_indices).intersection(bit_1_indices)) == 0:
|
||||
// return False
|
||||
// # Verify indices are sorted
|
||||
// if not (bit_0_indices == sorted(bit_0_indices) and bit_1_indices == sorted(bit_1_indices)):
|
||||
// if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE:
|
||||
// return False
|
||||
// # Verify indices are sorted and unique
|
||||
// if not indices == sorted(set(indices)):
|
||||
// # Verify aggregate signature
|
||||
// if not bls_verify_multiple(
|
||||
// pubkeys=[
|
||||
// bls_aggregate_pubkeys([state.validators[i].pubkey for i in bit_0_indices]),
|
||||
// bls_aggregate_pubkeys([state.validators[i].pubkey for i in bit_1_indices]),
|
||||
// ],
|
||||
// message_hashes=[
|
||||
// hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)),
|
||||
// hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)),
|
||||
// ],
|
||||
// if not bls_verify(
|
||||
// pubkey=bls_aggregate_pubkeys([state.validators[i].pubkey for i in indices]),
|
||||
// message_hash=hash_tree_root(indexed_attestation.data),
|
||||
// signature=indexed_attestation.signature,
|
||||
// domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target.epoch),
|
||||
// domain=get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch),
|
||||
// ):
|
||||
// return False
|
||||
// return True
|
||||
@@ -782,87 +715,48 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *pb.BeaconState,
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
|
||||
defer span.End()
|
||||
|
||||
custodyBit0Indices := indexedAtt.CustodyBit_0Indices
|
||||
custodyBit1Indices := indexedAtt.CustodyBit_1Indices
|
||||
indices := indexedAtt.AttestingIndices
|
||||
|
||||
// To be removed in phase 1
|
||||
if len(custodyBit1Indices) != 0 {
|
||||
return fmt.Errorf("expected no bit 1 indices, received %v", len(custodyBit1Indices))
|
||||
if uint64(len(indices)) > params.BeaconConfig().MaxValidatorsPerCommittee {
|
||||
return fmt.Errorf("validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE, %d > %d", len(indices), params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
}
|
||||
|
||||
maxIndices := params.BeaconConfig().MaxValidatorsPerCommittee
|
||||
totalIndicesLength := uint64(len(custodyBit0Indices) + len(custodyBit1Indices))
|
||||
if totalIndicesLength > maxIndices {
|
||||
return fmt.Errorf("over max number of allowed indices per attestation: %d", totalIndicesLength)
|
||||
set := make(map[uint64]bool)
|
||||
setIndices := make([]uint64, 0, len(indices))
|
||||
for _, i := range indices {
|
||||
if ok := set[i]; ok {
|
||||
continue
|
||||
}
|
||||
setIndices = append(setIndices, i)
|
||||
set[i] = true
|
||||
}
|
||||
custodyBitIntersection := sliceutil.IntersectionUint64(custodyBit0Indices, custodyBit1Indices)
|
||||
if len(custodyBitIntersection) != 0 {
|
||||
return fmt.Errorf("expected disjoint indices intersection, received %v", custodyBitIntersection)
|
||||
}
|
||||
|
||||
custodyBit0IndicesIsSorted := sort.SliceIsSorted(custodyBit0Indices, func(i, j int) bool {
|
||||
return custodyBit0Indices[i] < custodyBit0Indices[j]
|
||||
sort.SliceStable(setIndices, func(i, j int) bool {
|
||||
return setIndices[i] < setIndices[j]
|
||||
})
|
||||
|
||||
if !custodyBit0IndicesIsSorted {
|
||||
return fmt.Errorf("custody Bit0 indices are not sorted, got %v", custodyBit0Indices)
|
||||
if !reflect.DeepEqual(setIndices, indices) {
|
||||
return errors.New("attesting indices is not uniquely sorted")
|
||||
}
|
||||
|
||||
custodyBit1IndicesIsSorted := sort.SliceIsSorted(custodyBit1Indices, func(i, j int) bool {
|
||||
return custodyBit1Indices[i] < custodyBit1Indices[j]
|
||||
})
|
||||
|
||||
if !custodyBit1IndicesIsSorted {
|
||||
return fmt.Errorf("custody Bit1 indices are not sorted, got %v", custodyBit1Indices)
|
||||
}
|
||||
|
||||
domain := helpers.Domain(beaconState.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainAttestation)
|
||||
var pubkeys []*bls.PublicKey
|
||||
if len(custodyBit0Indices) > 0 {
|
||||
pubkey, err := bls.PublicKeyFromBytes(beaconState.Validators[custodyBit0Indices[0]].PublicKey)
|
||||
domain := helpers.Domain(beaconState.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
var pubkey *bls.PublicKey
|
||||
var err error
|
||||
if len(indices) > 0 {
|
||||
pubkey, err = bls.PublicKeyFromBytes(beaconState.Validators[indices[0]].PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
}
|
||||
for _, i := range custodyBit0Indices[1:] {
|
||||
for _, i := range indices[1:] {
|
||||
pk, err := bls.PublicKeyFromBytes(beaconState.Validators[i].PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
}
|
||||
pubkey.Aggregate(pk)
|
||||
}
|
||||
pubkeys = append(pubkeys, pubkey)
|
||||
}
|
||||
if len(custodyBit1Indices) > 0 {
|
||||
pubkey, err := bls.PublicKeyFromBytes(beaconState.Validators[custodyBit1Indices[0]].PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
}
|
||||
for _, i := range custodyBit1Indices[1:] {
|
||||
pk, err := bls.PublicKeyFromBytes(beaconState.Validators[i].PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
}
|
||||
pubkey.Aggregate(pk)
|
||||
}
|
||||
pubkeys = append(pubkeys, pubkey)
|
||||
}
|
||||
|
||||
var msgs [][32]byte
|
||||
cus0 := &pb.AttestationDataAndCustodyBit{Data: indexedAtt.Data, CustodyBit: false}
|
||||
cus1 := &pb.AttestationDataAndCustodyBit{Data: indexedAtt.Data, CustodyBit: true}
|
||||
if len(custodyBit0Indices) > 0 {
|
||||
cus0Root, err := ssz.HashTreeRoot(cus0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash att data and custody bit 0")
|
||||
}
|
||||
msgs = append(msgs, cus0Root)
|
||||
}
|
||||
if len(custodyBit1Indices) > 0 {
|
||||
cus1Root, err := ssz.HashTreeRoot(cus1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash att data and custody bit 1")
|
||||
}
|
||||
msgs = append(msgs, cus1Root)
|
||||
messageHash, err := ssz.HashTreeRoot(indexedAtt.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash att data")
|
||||
}
|
||||
|
||||
sig, err := bls.SignatureFromBytes(indexedAtt.Signature)
|
||||
@@ -870,10 +764,9 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *pb.BeaconState,
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
|
||||
hasVotes := len(custodyBit0Indices) > 0 || len(custodyBit1Indices) > 0
|
||||
|
||||
if hasVotes && !sig.VerifyAggregate(pubkeys, msgs, domain) {
|
||||
return fmt.Errorf("attestation aggregation signature did not verify")
|
||||
voted := len(indices) > 0
|
||||
if voted && !sig.Verify(messageHash[:], pubkey, domain) {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -881,7 +774,11 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *pb.BeaconState,
|
||||
// VerifyAttestation converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestation(ctx context.Context, beaconState *pb.BeaconState, att *ethpb.Attestation) error {
|
||||
indexedAtt, err := ConvertToIndexed(ctx, beaconState, att)
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert to indexed attestation")
|
||||
}
|
||||
@@ -909,51 +806,72 @@ func ProcessDeposits(ctx context.Context, beaconState *pb.BeaconState, body *eth
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessPreGenesisDeposit processes a deposit for the beacon state before chainstart.
|
||||
func ProcessPreGenesisDeposit(ctx context.Context, beaconState *pb.BeaconState,
|
||||
deposit *ethpb.Deposit, validatorIndices map[[48]byte]int) (*pb.BeaconState, error) {
|
||||
var err error
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, validatorIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
pubkey := deposit.Data.PublicKey
|
||||
index, ok := validatorIndices[bytesutil.ToBytes48(pubkey)]
|
||||
if !ok {
|
||||
return beaconState, nil
|
||||
}
|
||||
balance := beaconState.Balances[index]
|
||||
beaconState.Validators[index].EffectiveBalance = mathutil.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
if beaconState.Validators[index].EffectiveBalance ==
|
||||
params.BeaconConfig().MaxEffectiveBalance {
|
||||
beaconState.Validators[index].ActivationEligibilityEpoch = 0
|
||||
beaconState.Validators[index].ActivationEpoch = 0
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposit takes in a deposit object and inserts it
|
||||
// into the registry as a new validator or balance change.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
// """
|
||||
// Process an Eth1 deposit, registering a validator or increasing its balance.
|
||||
// """
|
||||
// # Verify the Merkle branch
|
||||
// assert verify_merkle_branch(
|
||||
// leaf=hash_tree_root(deposit.data),
|
||||
// proof=deposit.proof,
|
||||
// depth=DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||
// index=deposit.index,
|
||||
// root=state.latest_eth1_data.deposit_root,
|
||||
// )
|
||||
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
// # Verify the Merkle branch
|
||||
// assert is_valid_merkle_branch(
|
||||
// leaf=hash_tree_root(deposit.data),
|
||||
// branch=deposit.proof,
|
||||
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the `List` length mix-in
|
||||
// index=state.eth1_deposit_index,
|
||||
// root=state.eth1_data.deposit_root,
|
||||
// )
|
||||
//
|
||||
// # Deposits must be processed in order
|
||||
// assert deposit.index == state.deposit_index
|
||||
// state.deposit_index += 1
|
||||
// # Deposits must be processed in order
|
||||
// state.eth1_deposit_index += 1
|
||||
//
|
||||
// pubkey = deposit.data.pubkey
|
||||
// amount = deposit.data.amount
|
||||
// validator_pubkeys = [v.pubkey for v in state.validator_registry]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession).
|
||||
// # Invalid signatures are allowed by the deposit contract, and hence included on-chain, but must not be processed.
|
||||
// if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature%d, get_domain(state, DOMAIN_DEPOSIT)):
|
||||
// return
|
||||
// pubkey = deposit.data.pubkey
|
||||
// amount = deposit.data.amount
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) for new validators.
|
||||
// # Note: The deposit contract does not check signatures.
|
||||
// # Note: Deposits are valid across forks, thus the deposit domain is retrieved directly from `compute_domain`.
|
||||
// domain = compute_domain(DOMAIN_DEPOSIT)
|
||||
// if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, domain):
|
||||
// return
|
||||
//
|
||||
// # Add validator and balance entries
|
||||
// state.validator_registry.append(Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
// ))
|
||||
// state.balances.append(amount)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// index = validator_pubkeys.index(pubkey)
|
||||
// increase_balance(state, index, amount)
|
||||
// # Add validator and balance entries
|
||||
// state.validators.append(Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE),
|
||||
// ))
|
||||
// state.balances.append(amount)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ProcessDeposit(beaconState *pb.BeaconState, deposit *ethpb.Deposit, valIndexMap map[[48]byte]int) (*pb.BeaconState, error) {
|
||||
if err := verifyDeposit(beaconState, deposit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
@@ -963,9 +881,9 @@ func ProcessDeposit(beaconState *pb.BeaconState, deposit *ethpb.Deposit, valInde
|
||||
amount := deposit.Data.Amount
|
||||
index, ok := valIndexMap[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok {
|
||||
domain := helpers.Domain(beaconState.Fork, helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainDeposit)
|
||||
domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit)
|
||||
depositSig := deposit.Data.Signature
|
||||
if err := verifySigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.Errorf("Skipping deposit: could not verify deposit data signature: %v", err)
|
||||
return beaconState, nil
|
||||
@@ -985,6 +903,7 @@ func ProcessDeposit(beaconState *pb.BeaconState, deposit *ethpb.Deposit, valInde
|
||||
EffectiveBalance: effectiveBalance,
|
||||
})
|
||||
beaconState.Balances = append(beaconState.Balances, amount)
|
||||
valIndexMap[bytesutil.ToBytes48(pubKey)] = len(beaconState.Validators) - 1
|
||||
} else {
|
||||
beaconState = helpers.IncreaseBalance(beaconState, uint64(index), amount)
|
||||
}
|
||||
@@ -1044,7 +963,7 @@ func ProcessVoluntaryExits(ctx context.Context, beaconState *pb.BeaconState, bod
|
||||
if err := VerifyExit(beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, err = v.InitiateValidatorExit(beaconState, exit.ValidatorIndex)
|
||||
beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1062,7 +981,7 @@ func ProcessVoluntaryExitsNoVerify(
|
||||
exits := body.VoluntaryExits
|
||||
|
||||
for idx, exit := range exits {
|
||||
beaconState, err = v.InitiateValidatorExit(beaconState, exit.ValidatorIndex)
|
||||
beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to process voluntary exit at index %d", idx)
|
||||
}
|
||||
@@ -1089,7 +1008,12 @@ func ProcessVoluntaryExitsNoVerify(
|
||||
// # Verify signature
|
||||
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch)
|
||||
// assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain)
|
||||
func VerifyExit(beaconState *pb.BeaconState, exit *ethpb.VoluntaryExit) error {
|
||||
func VerifyExit(beaconState *pb.BeaconState, signed *ethpb.SignedVoluntaryExit) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
|
||||
exit := signed.Exit
|
||||
if int(exit.ValidatorIndex) >= len(beaconState.Validators) {
|
||||
return fmt.Errorf("validator index out of bound %d > %d", exit.ValidatorIndex, len(beaconState.Validators))
|
||||
}
|
||||
@@ -1117,132 +1041,8 @@ func VerifyExit(beaconState *pb.BeaconState, exit *ethpb.VoluntaryExit) error {
|
||||
)
|
||||
}
|
||||
domain := helpers.Domain(beaconState.Fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit)
|
||||
if err := verifySigningRoot(exit, validator.PublicKey, exit.Signature, domain); err != nil {
|
||||
return errors.Wrap(err, "could not verify voluntary exit signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessTransfers is one of the operations performed
|
||||
// on each processed beacon block to determine transfers between beacon chain balances.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_transfer(state: BeaconState, transfer: Transfer) -> None:
|
||||
// """
|
||||
// Process ``Transfer`` operation.
|
||||
// """
|
||||
// # Verify the balance the covers amount and fee (with overflow protection)
|
||||
// assert state.balances[transfer.sender] >= max(transfer.amount + transfer.fee, transfer.amount, transfer.fee)
|
||||
// # A transfer is valid in only one slot
|
||||
// assert state.slot == transfer.slot
|
||||
// # SenderIndex must satisfy at least one of the following conditions in the parenthesis:
|
||||
// assert (
|
||||
// # * Has not been activated
|
||||
// state.validator_registry[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or
|
||||
// # * Is withdrawable
|
||||
// get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or
|
||||
// # * Balance after transfer is more than the effective balance threshold
|
||||
// transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= state.balances[transfer.sender]
|
||||
// )
|
||||
// # Verify that the pubkey is valid
|
||||
// assert (
|
||||
// state.validator_registry[transfer.sender].withdrawal_credentials ==
|
||||
// int_to_bytes(BLS_WITHDRAWAL_PREFIX, length=1) + hash(transfer.pubkey)[1:]
|
||||
// )
|
||||
// # Verify that the signature is valid
|
||||
// assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER))
|
||||
// # Process the transfer
|
||||
// decrease_balance(state, transfer.sender, transfer.amount + transfer.fee)
|
||||
// increase_balance(state, transfer.recipient, transfer.amount)
|
||||
// increase_balance(state, get_beacon_proposer_index(state), transfer.fee)
|
||||
// # Verify balances are not dust
|
||||
// assert not (0 < state.balances[transfer.sender] < MIN_DEPOSIT_AMOUNT)
|
||||
// assert not (0 < state.balances[transfer.recipient] < MIN_DEPOSIT_AMOUNT)
|
||||
func ProcessTransfers(
|
||||
beaconState *pb.BeaconState,
|
||||
body *ethpb.BeaconBlockBody,
|
||||
) (*pb.BeaconState, error) {
|
||||
transfers := body.Transfers
|
||||
|
||||
for idx, transfer := range transfers {
|
||||
if err := verifyTransfer(beaconState, transfer); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify transfer %d", idx)
|
||||
}
|
||||
// Process the transfer between accounts.
|
||||
beaconState = helpers.DecreaseBalance(beaconState, transfer.SenderIndex, transfer.Amount+transfer.Fee)
|
||||
beaconState = helpers.IncreaseBalance(beaconState, transfer.RecipientIndex, transfer.Amount)
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine beacon proposer index")
|
||||
}
|
||||
beaconState = helpers.IncreaseBalance(beaconState, proposerIndex, transfer.Fee)
|
||||
|
||||
// Finally, we verify balances will not go below the mininum.
|
||||
if beaconState.Balances[transfer.SenderIndex] < params.BeaconConfig().MinDepositAmount &&
|
||||
0 < beaconState.Balances[transfer.SenderIndex] {
|
||||
return nil, fmt.Errorf(
|
||||
"sender balance below critical level: %v",
|
||||
beaconState.Balances[transfer.SenderIndex],
|
||||
)
|
||||
}
|
||||
if beaconState.Balances[transfer.RecipientIndex] < params.BeaconConfig().MinDepositAmount &&
|
||||
0 < beaconState.Balances[transfer.RecipientIndex] {
|
||||
return nil, fmt.Errorf(
|
||||
"recipient balance below critical level: %v",
|
||||
beaconState.Balances[transfer.RecipientIndex],
|
||||
)
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
func verifyTransfer(beaconState *pb.BeaconState, transfer *ethpb.Transfer) error {
|
||||
if transfer.SenderIndex > uint64(len(beaconState.Validators)) {
|
||||
return errors.New("transfer sender index out of bounds in validator registry")
|
||||
}
|
||||
|
||||
maxVal := transfer.Fee
|
||||
if transfer.Amount > maxVal {
|
||||
maxVal = transfer.Amount
|
||||
}
|
||||
if transfer.Amount+transfer.Fee > maxVal {
|
||||
maxVal = transfer.Amount + transfer.Fee
|
||||
}
|
||||
sender := beaconState.Validators[transfer.SenderIndex]
|
||||
senderBalance := beaconState.Balances[transfer.SenderIndex]
|
||||
// Verify the balance the covers amount and fee (with overflow protection).
|
||||
if senderBalance < maxVal {
|
||||
return fmt.Errorf("expected sender balance %d >= %d", senderBalance, maxVal)
|
||||
}
|
||||
// A transfer is valid in only one slot.
|
||||
if beaconState.Slot != transfer.Slot {
|
||||
return fmt.Errorf("expected beacon state slot %d == transfer slot %d", beaconState.Slot, transfer.Slot)
|
||||
}
|
||||
|
||||
// Sender must be not yet eligible for activation, withdrawn, or transfer balance over MAX_EFFECTIVE_BALANCE.
|
||||
senderNotActivationEligible := sender.ActivationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
senderNotWithdrawn := helpers.CurrentEpoch(beaconState) >= sender.WithdrawableEpoch
|
||||
underMaxTransfer := transfer.Amount+transfer.Fee+params.BeaconConfig().MaxEffectiveBalance <= senderBalance
|
||||
|
||||
if !(senderNotActivationEligible || senderNotWithdrawn || underMaxTransfer) {
|
||||
return fmt.Errorf(
|
||||
"expected activation eligiblity: false or withdrawn: false or over max transfer: false, received %v %v %v",
|
||||
senderNotActivationEligible,
|
||||
senderNotWithdrawn,
|
||||
underMaxTransfer,
|
||||
)
|
||||
}
|
||||
// Verify that the pubkey is valid.
|
||||
buf := []byte{params.BeaconConfig().BLSWithdrawalPrefixByte}
|
||||
hashed := hashutil.Hash(transfer.SenderWithdrawalPublicKey)
|
||||
buf = append(buf, hashed[:][1:]...)
|
||||
if !bytes.Equal(sender.WithdrawalCredentials, buf) {
|
||||
return fmt.Errorf("invalid public key, expected %v, received %v", buf, sender.WithdrawalCredentials)
|
||||
}
|
||||
|
||||
domain := helpers.Domain(beaconState.Fork, helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainTransfer)
|
||||
if err := verifySigningRoot(transfer, transfer.SenderWithdrawalPublicKey, transfer.Signature, domain); err != nil {
|
||||
return errors.Wrap(err, "could not verify transfer signature")
|
||||
if err := verifySigningRoot(exit, validator.PublicKey, signed.Signature, domain); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
36
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
36
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ctx := context.Background()
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
att := ð.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
_, _ = blocks.ProcessAttestationNoVerify(ctx, state, att)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
block := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
_, _ = blocks.ProcessBlockHeader(state, block)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,11 +11,11 @@ func TestGenesisBlock_InitializedCorrectly(t *testing.T) {
|
||||
stateHash := []byte{0}
|
||||
b1 := blocks.NewGenesisBlock(stateHash)
|
||||
|
||||
if b1.ParentRoot == nil {
|
||||
if b1.Block.ParentRoot == nil {
|
||||
t.Error("genesis block missing ParentHash field")
|
||||
}
|
||||
|
||||
if !bytes.Equal(b1.StateRoot, stateHash) {
|
||||
if !bytes.Equal(b1.Block.StateRoot, stateHash) {
|
||||
t.Error("genesis block StateRootHash32 isn't initialized correctly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
|
||||
@@ -36,10 +36,10 @@ go_test(
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params/spectest:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
@@ -69,10 +69,10 @@ go_test(
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params/spectest:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"gopkg.in/d4l3k/messagediff.v1"
|
||||
@@ -26,7 +25,6 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
testFolders, testsFolderPath := testutil.TestFolders(t, config, "operations/block_header/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
blockFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "block.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -54,7 +52,8 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err := blocks.ProcessBlockHeader(preBeaconState, block)
|
||||
// Spectest blocks are not signed, so we'll call NoVerify to skip sig verification.
|
||||
beaconState, err := blocks.ProcessBlockHeaderNoVerify(preBeaconState, block)
|
||||
if postSSZExists {
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
|
||||
@@ -10,11 +10,10 @@ import (
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"gopkg.in/d4l3k/messagediff.v1"
|
||||
@@ -28,7 +27,6 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
testFolders, testsFolderPath := testutil.TestFolders(t, config, "sanity/blocks/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
preBeaconStateFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -55,11 +53,14 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block := ðpb.BeaconBlock{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
if err := ssz.Unmarshal(blockFile, block); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
beaconState, transitionError = state.ExecuteStateTransition(context.Background(), beaconState, block)
|
||||
if transitionError != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the post.ssz is not present, it means the test should fail on our end.
|
||||
@@ -73,7 +74,7 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
|
||||
if postSSZExists {
|
||||
if transitionError != nil {
|
||||
t.Fatalf("Unexpected error: %v", transitionError)
|
||||
t.Errorf("Unexpected error: %v", transitionError)
|
||||
}
|
||||
|
||||
postBeaconStateFile, err := ioutil.ReadFile(postSSZFilepath)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
@@ -24,12 +24,12 @@ func runVoluntaryExitTest(t *testing.T, config string) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
voluntaryExit := ðpb.VoluntaryExit{}
|
||||
voluntaryExit := ðpb.SignedVoluntaryExit{}
|
||||
if err := ssz.Unmarshal(exitFile, voluntaryExit); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
body := ðpb.BeaconBlockBody{VoluntaryExits: []*ethpb.VoluntaryExit{voluntaryExit}}
|
||||
body := ðpb.BeaconBlockBody{VoluntaryExits: []*ethpb.SignedVoluntaryExit{voluntaryExit}}
|
||||
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessVoluntaryExits)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,21 +2,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"epoch_processing.go",
|
||||
"participation.go",
|
||||
],
|
||||
srcs = ["epoch_processing.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -25,16 +21,17 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"epoch_processing_fuzz_test.go",
|
||||
"epoch_processing_test.go",
|
||||
"participation_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,101 +5,32 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// MatchedAttestations is an object that contains the correctly
|
||||
// voted attestations based on source, target and head criteria.
|
||||
type MatchedAttestations struct {
|
||||
source []*pb.PendingAttestation
|
||||
Target []*pb.PendingAttestation
|
||||
head []*pb.PendingAttestation
|
||||
}
|
||||
var epochState *pb.BeaconState
|
||||
|
||||
// MatchAttestations matches the attestations gathered in a span of an epoch
|
||||
// and categorize them whether they correctly voted for source, target and head.
|
||||
// We combined the individual helpers from spec for efficiency and to achieve O(N) run time.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// assert epoch in (get_current_epoch(state), get_previous_epoch(state))
|
||||
// return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations
|
||||
//
|
||||
// def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// return [
|
||||
// a for a in get_matching_source_attestations(state, epoch)
|
||||
// if a.data.target_root == get_block_root(state, epoch)
|
||||
// ]
|
||||
//
|
||||
// def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// return [
|
||||
// a for a in get_matching_source_attestations(state, epoch)
|
||||
// if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data))
|
||||
// ]
|
||||
func MatchAttestations(state *pb.BeaconState, epoch uint64) (*MatchedAttestations, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
previousEpoch := helpers.PrevEpoch(state)
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
// by activation epoch and by index number.
|
||||
type sortableIndices []uint64
|
||||
|
||||
// Input epoch for matching the source attestations has to be within range
|
||||
// of current epoch & previous epoch.
|
||||
if epoch != currentEpoch && epoch != previousEpoch {
|
||||
return nil, fmt.Errorf("input epoch: %d != current epoch: %d or previous epoch: %d",
|
||||
epoch, currentEpoch, previousEpoch)
|
||||
func (s sortableIndices) Len() int { return len(s) }
|
||||
func (s sortableIndices) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s sortableIndices) Less(i, j int) bool {
|
||||
if epochState.Validators[s[i]].ActivationEligibilityEpoch == epochState.Validators[s[j]].ActivationEligibilityEpoch {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// Decide if the source attestations are coming from current or previous epoch.
|
||||
var srcAtts []*pb.PendingAttestation
|
||||
if epoch == currentEpoch {
|
||||
srcAtts = state.CurrentEpochAttestations
|
||||
} else {
|
||||
srcAtts = state.PreviousEpochAttestations
|
||||
}
|
||||
targetRoot, err := helpers.BlockRoot(state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for epoch %d", epoch)
|
||||
}
|
||||
|
||||
tgtAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
|
||||
headAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
|
||||
for _, srcAtt := range srcAtts {
|
||||
// If the target root matches attestation's target root,
|
||||
// then we know this attestation has correctly voted for target.
|
||||
if bytes.Equal(srcAtt.Data.Target.Root, targetRoot) {
|
||||
tgtAtts = append(tgtAtts, srcAtt)
|
||||
}
|
||||
|
||||
// If the block root at slot matches attestation's block root at slot,
|
||||
// then we know this attestation has correctly voted for head.
|
||||
slot, err := helpers.AttestationDataSlot(state, srcAtt.Data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation slot")
|
||||
}
|
||||
headRoot, err := helpers.BlockRootAtSlot(state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for slot %d", slot)
|
||||
}
|
||||
if bytes.Equal(srcAtt.Data.BeaconBlockRoot, headRoot) {
|
||||
headAtts = append(headAtts, srcAtt)
|
||||
}
|
||||
}
|
||||
|
||||
return &MatchedAttestations{
|
||||
source: srcAtts,
|
||||
Target: tgtAtts,
|
||||
head: headAtts,
|
||||
}, nil
|
||||
return epochState.Validators[s[i]].ActivationEligibilityEpoch < epochState.Validators[s[j]].ActivationEligibilityEpoch
|
||||
}
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
@@ -119,235 +50,39 @@ func AttestingBalance(state *pb.BeaconState, atts []*pb.PendingAttestation) (uin
|
||||
return helpers.TotalBalance(state, indices), nil
|
||||
}
|
||||
|
||||
// ProcessJustificationAndFinalization processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
// return
|
||||
//
|
||||
// previous_epoch = get_previous_epoch(state)
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
// old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Process justifications
|
||||
// state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
// state.justification_bits[1:] = state.justification_bits[:-1]
|
||||
// state.justification_bits[0] = 0b0
|
||||
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
|
||||
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
||||
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
// root=get_block_root(state, previous_epoch))
|
||||
// state.justification_bits[1] = 0b1
|
||||
// matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
|
||||
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
||||
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
// root=get_block_root(state, current_epoch))
|
||||
// state.justification_bits[0] = 0b1
|
||||
//
|
||||
// # Process finalizations
|
||||
// bits = state.justification_bits
|
||||
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
// state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
// state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
func ProcessJustificationAndFinalization(state *pb.BeaconState, prevAttestedBal uint64, currAttestedBal uint64) (*pb.BeaconState, error) {
|
||||
if state.Slot <= helpers.StartSlot(2) {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint
|
||||
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint
|
||||
|
||||
totalBal, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get total balance")
|
||||
}
|
||||
|
||||
// Process justifications
|
||||
state.PreviousJustifiedCheckpoint = state.CurrentJustifiedCheckpoint
|
||||
state.JustificationBits.Shift(1)
|
||||
|
||||
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
|
||||
// We will use that paradigm here for consistency with the godoc spec definition.
|
||||
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevAttestedBal >= 2*totalBal {
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
state.CurrentJustifiedCheckpoint = ðpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}
|
||||
state.JustificationBits.SetBitAt(1, true)
|
||||
}
|
||||
|
||||
// If 2/3 or more of the total balance attested in the current epoch.
|
||||
if 3*currAttestedBal >= 2*totalBal {
|
||||
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
|
||||
}
|
||||
state.CurrentJustifiedCheckpoint = ðpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}
|
||||
state.JustificationBits.SetBitAt(0, true)
|
||||
}
|
||||
|
||||
// Process finalization according to ETH2.0 specifications.
|
||||
justification := state.JustificationBits.Bytes()[0]
|
||||
|
||||
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
|
||||
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
|
||||
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
|
||||
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
|
||||
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessCrosslinks processes crosslink and finds the crosslink
|
||||
// with enough state to make it canonical in state.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_crosslinks(state: BeaconState) -> None:
|
||||
// state.previous_crosslinks = [c for c in state.current_crosslinks]
|
||||
// for epoch in (get_previous_epoch(state), get_current_epoch(state)):
|
||||
// for offset in range(get_epoch_committee_count(state, epoch)):
|
||||
// shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
|
||||
// crosslink_committee = get_crosslink_committee(state, epoch, shard)
|
||||
// winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
|
||||
// if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee):
|
||||
// state.current_crosslinks[shard] = winning_crosslink
|
||||
func ProcessCrosslinks(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
copy(state.PreviousCrosslinks, state.CurrentCrosslinks)
|
||||
epochs := []uint64{helpers.PrevEpoch(state), helpers.CurrentEpoch(state)}
|
||||
for _, e := range epochs {
|
||||
count, err := helpers.CommitteeCount(state, e)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch committee count")
|
||||
}
|
||||
startShard, err := helpers.StartShard(state, e)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start shards")
|
||||
}
|
||||
for offset := uint64(0); offset < count; offset++ {
|
||||
shard := (startShard + offset) % params.BeaconConfig().ShardCount
|
||||
committee, err := helpers.CrosslinkCommittee(state, e, shard)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get crosslink committee")
|
||||
}
|
||||
crosslink, indices, err := WinningCrosslink(state, shard, e)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get winning crosslink")
|
||||
}
|
||||
attestedBalance := helpers.TotalBalance(state, indices)
|
||||
totalBalance := helpers.TotalBalance(state, committee)
|
||||
|
||||
// In order for a crosslink to get included in state, the attesting balance needs to
|
||||
// be greater than 2/3 of the total balance.
|
||||
if 3*attestedBalance >= 2*totalBalance {
|
||||
state.CurrentCrosslinks[shard] = crosslink
|
||||
}
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessRewardsAndPenalties processes the rewards and penalties of individual validator.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
// if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
// return
|
||||
//
|
||||
// rewards1, penalties1 = get_attestation_deltas(state)
|
||||
// rewards2, penalties2 = get_crosslink_deltas(state)
|
||||
// for i in range(len(state.validator_registry)):
|
||||
// increase_balance(state, i, rewards1[i] + rewards2[i])
|
||||
// decrease_balance(state, i, penalties1[i] + penalties2[i])
|
||||
func ProcessRewardsAndPenalties(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// Can't process rewards and penalties in genesis epoch.
|
||||
if helpers.CurrentEpoch(state) == 0 {
|
||||
return state, nil
|
||||
}
|
||||
attsRewards, attsPenalties, err := attestationDelta(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation delta")
|
||||
}
|
||||
clRewards, clPenalties, err := crosslinkDelta(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get crosslink delta")
|
||||
}
|
||||
|
||||
for i := 0; i < len(state.Validators); i++ {
|
||||
state = helpers.IncreaseBalance(state, uint64(i), attsRewards[i]+clRewards[i])
|
||||
state = helpers.DecreaseBalance(state, uint64(i), attsPenalties[i]+clPenalties[i])
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validator_registry):
|
||||
// if (
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
// validator.effective_balance >= MAX_EFFECTIVE_BALANCE
|
||||
// ):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
//
|
||||
// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
|
||||
// initiate_validator_exit(state, index)
|
||||
// initiate_validator_exit(state, ValidatorIndex(index))
|
||||
//
|
||||
// # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
|
||||
// # Queue validators eligible for activation and not yet dequeued for activation
|
||||
// activation_queue = sorted([
|
||||
// index for index, validator in enumerate(state.validator_registry) if
|
||||
// validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
|
||||
// validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
|
||||
// ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
|
||||
// # Dequeued validators for activation up to churn limit (without resetting activation epoch)
|
||||
// for index in activation_queue[:get_churn_limit(state)]:
|
||||
// validator = state.validator_registry[index]
|
||||
// if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||
// validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
||||
// index for index, validator in enumerate(state.validators)
|
||||
// if is_eligible_for_activation(state, validator)
|
||||
// # Order by the sequence of activation_eligibility_epoch setting and then index
|
||||
// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
|
||||
// # Dequeued validators for activation up to churn limit
|
||||
// for index in activation_queue[:get_validator_churn_limit(state)]:
|
||||
// validator = state.validators[index]
|
||||
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
|
||||
var err error
|
||||
for idx, validator := range state.Validators {
|
||||
// Process the validators for activation eligibility.
|
||||
eligibleToActivate := validator.ActivationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
properBalance := validator.EffectiveBalance >= params.BeaconConfig().MaxEffectiveBalance
|
||||
if eligibleToActivate && properBalance {
|
||||
validator.ActivationEligibilityEpoch = currentEpoch
|
||||
if helpers.IsEligibleForActivationQueue(validator) {
|
||||
validator.ActivationEligibilityEpoch = helpers.CurrentEpoch(state) + 1
|
||||
}
|
||||
|
||||
// Process the validators for ejection.
|
||||
isActive := helpers.IsActiveValidator(validator, currentEpoch)
|
||||
belowEjectionBalance := validator.EffectiveBalance <= params.BeaconConfig().EjectionBalance
|
||||
@@ -359,22 +94,25 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Queue the validators whose eligible to activate and sort them by activation eligibility epoch number
|
||||
// Queue validators eligible for activation and not yet dequeued for activation.
|
||||
var activationQ []uint64
|
||||
for idx, validator := range state.Validators {
|
||||
eligibleActivated := validator.ActivationEligibilityEpoch != params.BeaconConfig().FarFutureEpoch
|
||||
canBeActive := validator.ActivationEpoch >= helpers.DelayedActivationExitEpoch(state.FinalizedCheckpoint.Epoch)
|
||||
if eligibleActivated && canBeActive {
|
||||
if helpers.IsEligibleForActivation(state, validator) {
|
||||
activationQ = append(activationQ, uint64(idx))
|
||||
}
|
||||
}
|
||||
sort.Slice(activationQ, func(i, j int) bool {
|
||||
return state.Validators[i].ActivationEligibilityEpoch < state.Validators[j].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
epochState = state
|
||||
sort.Sort(sortableIndices(activationQ))
|
||||
|
||||
// Only activate just enough validators according to the activation churn limit.
|
||||
limit := len(activationQ)
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(state)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
@@ -383,12 +121,12 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
if int(churnLimit) < limit {
|
||||
limit = int(churnLimit)
|
||||
}
|
||||
|
||||
for _, index := range activationQ[:limit] {
|
||||
validator := state.Validators[index]
|
||||
if validator.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
validator.ActivationEpoch = helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
}
|
||||
validator.ActivationEpoch = helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
@@ -480,6 +218,12 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
for i, v := range state.Validators {
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", i)
|
||||
}
|
||||
if i >= len(state.Balances) {
|
||||
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", i, len(state.Balances))
|
||||
}
|
||||
balance := state.Balances[i]
|
||||
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
|
||||
if balance < v.EffectiveBalance || v.EffectiveBalance+3*halfInc < balance {
|
||||
@@ -490,36 +234,19 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Set active index root.
|
||||
// index_epoch = Epoch(next_epoch + ACTIVATION_EXIT_DELAY)
|
||||
// index_root_position = index_epoch % EPOCHS_PER_HISTORICAL_VECTOR
|
||||
// indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, index_epoch))
|
||||
// state.active_index_roots[index_root_position] = hash_tree_root(indices_list)
|
||||
activationDelay := params.BeaconConfig().ActivationExitDelay
|
||||
idxRootPosition := (nextEpoch + activationDelay) % params.BeaconConfig().EpochsPerHistoricalVector
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(state, nextEpoch+activationDelay)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
idxRoot, err := ssz.HashTreeRootWithCapacity(activeIndices, uint64(1099511627776))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not tree hash active indices")
|
||||
}
|
||||
state.ActiveIndexRoots[idxRootPosition] = idxRoot[:]
|
||||
|
||||
commRootPosition := nextEpoch % params.BeaconConfig().EpochsPerHistoricalVector
|
||||
comRoot, err := helpers.CompactCommitteesRoot(state, nextEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get compact committee root")
|
||||
}
|
||||
state.CompactCommitteesRoots[commRootPosition] = comRoot[:]
|
||||
|
||||
// Set total slashed balances.
|
||||
slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
state.Slashings[nextEpoch%slashedExitLength] = 0
|
||||
slashedEpoch := int(nextEpoch % slashedExitLength)
|
||||
if len(state.Slashings) != int(slashedExitLength) {
|
||||
return nil, fmt.Errorf("state slashing length %d different than EpochsPerHistoricalVector %d", len(state.Slashings), slashedExitLength)
|
||||
}
|
||||
state.Slashings[slashedEpoch] = 0
|
||||
|
||||
// Set RANDAO mix.
|
||||
randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector
|
||||
if len(state.RandaoMixes) != int(randaoMixLength) {
|
||||
return nil, fmt.Errorf("state randao length %d different than EpochsPerHistoricalVector %d", len(state.RandaoMixes), randaoMixLength)
|
||||
}
|
||||
mix := helpers.RandaoMix(state, currentEpoch)
|
||||
state.RandaoMixes[nextEpoch%randaoMixLength] = mix
|
||||
|
||||
@@ -537,13 +264,6 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
state.HistoricalRoots = append(state.HistoricalRoots, batchRoot[:])
|
||||
}
|
||||
|
||||
// Update start shard.
|
||||
delta, err := helpers.ShardDelta(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get shard delta")
|
||||
}
|
||||
state.StartShard = (state.StartShard + delta) % params.BeaconConfig().ShardCount
|
||||
|
||||
// Rotate current and previous epoch attestations.
|
||||
state.PreviousEpochAttestations = state.CurrentEpochAttestations
|
||||
state.CurrentEpochAttestations = []*pb.PendingAttestation{}
|
||||
@@ -564,8 +284,13 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestation) ([]uint64, error) {
|
||||
var setIndices []uint64
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
attestingIndices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attestingIndices, err := helpers.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attester indices")
|
||||
}
|
||||
@@ -591,93 +316,6 @@ func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestat
|
||||
return setIndices, nil
|
||||
}
|
||||
|
||||
// WinningCrosslink returns the most staked balance-wise crosslink of a given shard and epoch.
|
||||
// It also returns the attesting inaidces of the winning cross link.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_winning_crosslink_and_attesting_indices(state: BeaconState,
|
||||
// epoch: Epoch,
|
||||
// shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]:
|
||||
// attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard]
|
||||
// crosslinks = list(filter(
|
||||
// lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)),
|
||||
// [a.data.crosslink for a in attestations]
|
||||
// ))
|
||||
// # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically)
|
||||
// winning_crosslink = max(crosslinks, key=lambda c: (
|
||||
// get_attesting_balance(state, [a for a in attestations if a.data.crosslink == c]), c.data_root
|
||||
// ), default=Crosslink())
|
||||
// winning_attestations = [a for a in attestations if a.data.crosslink == winning_crosslink]
|
||||
// return winning_crosslink, get_unslashed_attesting_indices(state, winning_attestations)
|
||||
func WinningCrosslink(state *pb.BeaconState, shard uint64, epoch uint64) (*ethpb.Crosslink, []uint64, error) {
|
||||
var shardAtts []*pb.PendingAttestation
|
||||
matchedAtts, err := MatchAttestations(state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get matching attestations")
|
||||
}
|
||||
|
||||
// Filter out source attestations by shard.
|
||||
for _, att := range matchedAtts.source {
|
||||
if att.Data.Crosslink.Shard == shard {
|
||||
shardAtts = append(shardAtts, att)
|
||||
}
|
||||
}
|
||||
var candidateCrosslinks []*ethpb.Crosslink
|
||||
// Filter out shard crosslinks with correct current or previous crosslink data.
|
||||
for _, a := range shardAtts {
|
||||
stateCrosslink := state.CurrentCrosslinks[shard]
|
||||
stateCrosslinkRoot, err := ssz.HashTreeRoot(stateCrosslink)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not hash tree root crosslink from state")
|
||||
}
|
||||
attCrosslinkRoot, err := ssz.HashTreeRoot(a.Data.Crosslink)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not hash tree root crosslink from attestation")
|
||||
}
|
||||
currCrosslinkMatches := bytes.Equal(stateCrosslinkRoot[:], attCrosslinkRoot[:])
|
||||
prevCrosslinkMatches := bytes.Equal(stateCrosslinkRoot[:], a.Data.Crosslink.ParentRoot)
|
||||
if currCrosslinkMatches || prevCrosslinkMatches {
|
||||
candidateCrosslinks = append(candidateCrosslinks, a.Data.Crosslink)
|
||||
}
|
||||
}
|
||||
|
||||
if len(candidateCrosslinks) == 0 {
|
||||
return ðpb.Crosslink{
|
||||
DataRoot: params.BeaconConfig().ZeroHash[:],
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
}, nil, nil
|
||||
}
|
||||
var crosslinkAtts []*pb.PendingAttestation
|
||||
var winnerBalance uint64
|
||||
var winnerCrosslink *ethpb.Crosslink
|
||||
// Out of the existing shard crosslinks, pick the one that has the
|
||||
// most balance staked.
|
||||
crosslinkAtts = attsForCrosslink(candidateCrosslinks[0], shardAtts)
|
||||
winnerBalance, err = AttestingBalance(state, crosslinkAtts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
winnerCrosslink = candidateCrosslinks[0]
|
||||
for _, c := range candidateCrosslinks {
|
||||
crosslinkAtts = attsForCrosslink(c, shardAtts)
|
||||
attestingBalance, err := AttestingBalance(state, crosslinkAtts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get crosslink's attesting balance")
|
||||
}
|
||||
if attestingBalance > winnerBalance {
|
||||
winnerCrosslink = c
|
||||
}
|
||||
}
|
||||
|
||||
crosslinkIndices, err := unslashedAttestingIndices(state, attsForCrosslink(winnerCrosslink, shardAtts))
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("could not get crosslink indices")
|
||||
}
|
||||
|
||||
return winnerCrosslink, crosslinkIndices, nil
|
||||
}
|
||||
|
||||
// BaseReward takes state and validator index and calculate
|
||||
// individual validator's base reward quotient.
|
||||
//
|
||||
@@ -699,271 +337,3 @@ func BaseReward(state *pb.BeaconState, index uint64) (uint64, error) {
|
||||
mathutil.IntegerSquareRoot(totalBalance) / params.BeaconConfig().BaseRewardsPerEpoch
|
||||
return baseReward, nil
|
||||
}
|
||||
|
||||
// attestationDelta calculates the rewards and penalties of individual
|
||||
// validator for voting the correct FFG source, FFG target, and head. It
|
||||
// also calculates proposer delay inclusion and inactivity rewards
|
||||
// and penalties. Individual rewards and penalties are returned in list.
|
||||
//
|
||||
// Note: we calculated adjusted quotient outside of base reward because it's too inefficient
|
||||
// to repeat the same calculation for every validator versus just doing it once.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
// previous_epoch = get_previous_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
// penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
// eligible_validator_indices = [
|
||||
// ValidatorIndex(index) for index, v in enumerate(state.validators)
|
||||
// if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||
// ]
|
||||
//
|
||||
// # Micro-incentives for matching FFG source, FFG target, and head
|
||||
// matching_source_attestations = get_matching_source_attestations(state, previous_epoch)
|
||||
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch)
|
||||
// matching_head_attestations = get_matching_head_attestations(state, previous_epoch)
|
||||
// for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations):
|
||||
// unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
|
||||
// attesting_balance = get_total_balance(state, unslashed_attesting_indices)
|
||||
// for index in eligible_validator_indices:
|
||||
// if index in unslashed_attesting_indices:
|
||||
// rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance
|
||||
// else:
|
||||
// penalties[index] += get_base_reward(state, index)
|
||||
//
|
||||
// # Proposer and inclusion delay micro-rewards
|
||||
// for index in get_unslashed_attesting_indices(state, matching_source_attestations):
|
||||
// index = ValidatorIndex(index)
|
||||
// attestation = min([
|
||||
// a for a in matching_source_attestations
|
||||
// if index in get_attesting_indices(state, a.data, a.aggregation_bits)
|
||||
// ], key=lambda a: a.inclusion_delay)
|
||||
// proposer_reward = Gwei(get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT)
|
||||
// rewards[attestation.proposer_index] += proposer_reward
|
||||
// max_attester_reward = get_base_reward(state, index) - proposer_reward
|
||||
// rewards[index] += Gwei(
|
||||
// max_attester_reward
|
||||
// * (SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY - attestation.inclusion_delay)
|
||||
// // SLOTS_PER_EPOCH
|
||||
// )
|
||||
//
|
||||
// # Inactivity penalty
|
||||
// finality_delay = previous_epoch - state.finalized_checkpoint.epoch
|
||||
// if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
|
||||
// matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
||||
// for index in eligible_validator_indices:
|
||||
// index = ValidatorIndex(index)
|
||||
// penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * get_base_reward(state, index))
|
||||
// if index not in matching_target_attesting_indices:
|
||||
// penalties[index] += Gwei(
|
||||
// state.validators[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
|
||||
// )
|
||||
//
|
||||
// return rewards, penalties
|
||||
func attestationDelta(state *pb.BeaconState) ([]uint64, []uint64, error) {
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
rewards := make([]uint64, len(state.Validators))
|
||||
penalties := make([]uint64, len(state.Validators))
|
||||
|
||||
// Filter out the list of eligible validator indices. The eligible validator
|
||||
// has to be active or slashed but before withdrawn.
|
||||
var eligible []uint64
|
||||
for i, v := range state.Validators {
|
||||
isActive := helpers.IsActiveValidator(v, prevEpoch)
|
||||
isSlashed := v.Slashed && (prevEpoch+1 < v.WithdrawableEpoch)
|
||||
if isActive || isSlashed {
|
||||
eligible = append(eligible, uint64(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Apply rewards and penalties for voting correct source target and head.
|
||||
// Construct a attestations list contains source, target and head attestations.
|
||||
atts, err := MatchAttestations(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get source, target and head attestations")
|
||||
}
|
||||
var attsPackage [][]*pb.PendingAttestation
|
||||
attsPackage = append(attsPackage, atts.source)
|
||||
attsPackage = append(attsPackage, atts.Target)
|
||||
attsPackage = append(attsPackage, atts.head)
|
||||
|
||||
// Cache the validators who voted correctly for source in a map
|
||||
// to calculate earliest attestation rewards later.
|
||||
attestersVotedSource := make(map[uint64]*pb.PendingAttestation)
|
||||
// Compute rewards / penalties for each attestation in the list and update
|
||||
// the rewards and penalties lists.
|
||||
for i, matchAtt := range attsPackage {
|
||||
indices, err := unslashedAttestingIndices(state, matchAtt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attestation indices")
|
||||
}
|
||||
|
||||
attested := make(map[uint64]bool)
|
||||
// Construct a map to look up validators that voted for source, target or head.
|
||||
for _, index := range indices {
|
||||
if i == 0 {
|
||||
attestersVotedSource[index] = &pb.PendingAttestation{InclusionDelay: params.BeaconConfig().FarFutureEpoch}
|
||||
}
|
||||
attested[index] = true
|
||||
}
|
||||
attestedBalance := helpers.TotalBalance(state, indices)
|
||||
|
||||
// Update rewards and penalties to each eligible validator index.
|
||||
for _, index := range eligible {
|
||||
base, err := BaseReward(state, index)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get base reward")
|
||||
}
|
||||
if _, ok := attested[index]; ok {
|
||||
rewards[index] += base * attestedBalance / totalBalance
|
||||
} else {
|
||||
penalties[index] += base
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For every index, filter the matching source attestation that correspond to the index,
|
||||
// sort by inclusion delay and get the one that was included on chain first.
|
||||
for _, att := range atts.source {
|
||||
indices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attester indices")
|
||||
}
|
||||
for _, i := range indices {
|
||||
if _, ok := attestersVotedSource[i]; ok {
|
||||
if attestersVotedSource[i].InclusionDelay > att.InclusionDelay {
|
||||
attestersVotedSource[i] = att
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, a := range attestersVotedSource {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
baseReward, err := BaseReward(state, i)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get proposer reward")
|
||||
}
|
||||
proposerReward := baseReward / params.BeaconConfig().ProposerRewardQuotient
|
||||
rewards[a.ProposerIndex] += proposerReward
|
||||
attesterReward := baseReward - proposerReward
|
||||
rewards[i] += attesterReward * (slotsPerEpoch + params.BeaconConfig().MinAttestationInclusionDelay - a.InclusionDelay) / slotsPerEpoch
|
||||
}
|
||||
|
||||
// Apply penalties for quadratic leaks.
|
||||
// When epoch since finality exceeds inactivity penalty constant, the penalty gets increased
|
||||
// based on the finality delay.
|
||||
finalityDelay := prevEpoch - state.FinalizedCheckpoint.Epoch
|
||||
if finalityDelay > params.BeaconConfig().MinEpochsToInactivityPenalty {
|
||||
targetIndices, err := unslashedAttestingIndices(state, atts.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attestation indices")
|
||||
}
|
||||
attestedTarget := make(map[uint64]bool)
|
||||
for _, index := range targetIndices {
|
||||
attestedTarget[index] = true
|
||||
}
|
||||
for _, index := range eligible {
|
||||
base, err := BaseReward(state, index)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get base reward")
|
||||
}
|
||||
penalties[index] += params.BeaconConfig().BaseRewardsPerEpoch * base
|
||||
if _, ok := attestedTarget[index]; !ok {
|
||||
penalties[index] += state.Validators[index].EffectiveBalance * finalityDelay /
|
||||
params.BeaconConfig().InactivityPenaltyQuotient
|
||||
}
|
||||
}
|
||||
}
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
// crosslinkDelta calculates the rewards and penalties of individual
|
||||
// validator for submitting the correct crosslink.
|
||||
// Individual rewards and penalties are returned in list.
|
||||
//
|
||||
// Note: we calculated adjusted quotient outside of base reward because it's too inefficient
|
||||
// to repeat the same calculation for every validator versus just doing it once.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
|
||||
// rewards = [0 for index in range(len(state.validator_registry))]
|
||||
// penalties = [0 for index in range(len(state.validator_registry))]
|
||||
// epoch = get_previous_epoch(state)
|
||||
// for offset in range(get_epoch_committee_count(state, epoch)):
|
||||
// shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
|
||||
// crosslink_committee = get_crosslink_committee(state, epoch, shard)
|
||||
// winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
|
||||
// attesting_balance = get_total_balance(state, attesting_indices)
|
||||
// committee_balance = get_total_balance(state, crosslink_committee)
|
||||
// for index in crosslink_committee:
|
||||
// base_reward = get_base_reward(state, index)
|
||||
// if index in attesting_indices:
|
||||
// rewards[index] += base_reward * attesting_balance // committee_balance
|
||||
// else:
|
||||
// penalties[index] += base_reward
|
||||
// return rewards, penalties
|
||||
func crosslinkDelta(state *pb.BeaconState) ([]uint64, []uint64, error) {
|
||||
rewards := make([]uint64, len(state.Validators))
|
||||
penalties := make([]uint64, len(state.Validators))
|
||||
epoch := helpers.PrevEpoch(state)
|
||||
count, err := helpers.CommitteeCount(state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get epoch committee count")
|
||||
}
|
||||
startShard, err := helpers.StartShard(state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get epoch start shard")
|
||||
}
|
||||
for i := uint64(0); i < count; i++ {
|
||||
shard := (startShard + i) % params.BeaconConfig().ShardCount
|
||||
committee, err := helpers.CrosslinkCommittee(state, epoch, shard)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get crosslink's committee")
|
||||
}
|
||||
_, attestingIndices, err := WinningCrosslink(state, shard, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get winning crosslink")
|
||||
}
|
||||
|
||||
attested := make(map[uint64]bool)
|
||||
// Construct a map to look up validators that voted for crosslink.
|
||||
for _, index := range attestingIndices {
|
||||
attested[index] = true
|
||||
}
|
||||
committeeBalance := helpers.TotalBalance(state, committee)
|
||||
attestingBalance := helpers.TotalBalance(state, attestingIndices)
|
||||
|
||||
for _, index := range committee {
|
||||
base, err := BaseReward(state, index)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get base reward")
|
||||
}
|
||||
if _, ok := attested[index]; ok {
|
||||
rewards[index] += base * attestingBalance / committeeBalance
|
||||
} else {
|
||||
penalties[index] += base
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
// attsForCrosslink returns the attestations of the input crosslink.
|
||||
func attsForCrosslink(crosslink *ethpb.Crosslink, atts []*pb.PendingAttestation) []*pb.PendingAttestation {
|
||||
var crosslinkAtts []*pb.PendingAttestation
|
||||
for _, a := range atts {
|
||||
if proto.Equal(a.Data.Crosslink, crosslink) {
|
||||
crosslinkAtts = append(crosslinkAtts, a)
|
||||
}
|
||||
}
|
||||
return crosslinkAtts
|
||||
}
|
||||
|
||||
18
beacon-chain/core/epoch/epoch_processing_fuzz_test.go
Normal file
18
beacon-chain/core/epoch/epoch_processing_fuzz_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
_, _ = ProcessFinalUpdates(state)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,31 +0,0 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
// ComputeValidatorParticipation by matching validator attestations during the epoch,
|
||||
// computing the attesting balance, and how much attested compared to the total balances.
|
||||
func ComputeValidatorParticipation(state *pb.BeaconState) (*ethpb.ValidatorParticipation, error) {
|
||||
currentEpoch := helpers.SlotToEpoch(state.Slot)
|
||||
atts, err := MatchAttestations(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve head attestations")
|
||||
}
|
||||
attestedBalances, err := AttestingBalance(state, atts.Target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve attested balances")
|
||||
}
|
||||
totalBalances, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve total balances")
|
||||
}
|
||||
return ðpb.ValidatorParticipation{
|
||||
GlobalParticipationRate: float32(attestedBalances) / float32(totalBalances),
|
||||
VotedEther: attestedBalances,
|
||||
EligibleEther: totalBalances,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package epoch_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestComputeValidatorParticipation(t *testing.T) {
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
e := uint64(1)
|
||||
attestedBalance := uint64(1)
|
||||
validatorCount := uint64(100)
|
||||
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Crosslink: ðpb.Crosslink{Shard: 0}, Target: ðpb.Checkpoint{}}}}
|
||||
var crosslinks []*ethpb.Crosslink
|
||||
for i := uint64(0); i < params.BeaconConfig().ShardCount; i++ {
|
||||
crosslinks = append(crosslinks, ðpb.Crosslink{
|
||||
StartEpoch: 0,
|
||||
DataRoot: []byte{'A'},
|
||||
})
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Slot: e*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
ActiveIndexRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CompactCommitteesRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentCrosslinks: crosslinks,
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
|
||||
res, err := epoch.ComputeValidatorParticipation(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(res, wanted) {
|
||||
t.Errorf("Incorrect validator participation, wanted %v received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
@@ -13,14 +13,13 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -38,12 +37,11 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Balances stores balances such as prev/current total validator balances, attested balances and more.
|
||||
// It's used for metrics reporting.
|
||||
var Balances *Balance
|
||||
|
||||
// ProcessAttestations process the attestations in state and update individual validator's pre computes,
|
||||
// it also tracks and updates epoch attesting balances.
|
||||
func ProcessAttestations(
|
||||
@@ -23,6 +27,7 @@ func ProcessAttestations(
|
||||
|
||||
v := &Validator{}
|
||||
var err error
|
||||
|
||||
for _, a := range append(state.PreviousEpochAttestations, state.CurrentEpochAttestations...) {
|
||||
v.IsCurrentEpochAttester, v.IsCurrentEpochTargetAttester, err = AttestedCurrentEpoch(state, a)
|
||||
if err != nil {
|
||||
@@ -35,21 +40,19 @@ func ProcessAttestations(
|
||||
return nil, nil, errors.Wrap(err, "could not check validator attested previous epoch")
|
||||
}
|
||||
|
||||
// Get attested indices and update the pre computed fields for each attested validators.
|
||||
indices, err := helpers.AttestingIndices(state, a.Data, a.AggregationBits)
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Get attestation slot to find lowest inclusion delayed attestation for each attested validators.
|
||||
aSlot, err := helpers.AttestationDataSlot(state, a.Data)
|
||||
indices, err := helpers.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
}
|
||||
vp = UpdateValidator(vp, v, indices, a, aSlot)
|
||||
vp = UpdateValidator(vp, v, indices, a, a.Data.Slot)
|
||||
}
|
||||
|
||||
bp = UpdateBalance(vp, bp)
|
||||
Balances = bp
|
||||
|
||||
return vp, bp, nil
|
||||
}
|
||||
@@ -112,11 +115,7 @@ func SameTarget(state *pb.BeaconState, a *pb.PendingAttestation, e uint64) (bool
|
||||
|
||||
// SameHead returns true if attestation `a` attested to the same block by attestation slot in state.
|
||||
func SameHead(state *pb.BeaconState, a *pb.PendingAttestation) (bool, error) {
|
||||
aSlot, err := helpers.AttestationDataSlot(state, a.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
r, err := helpers.BlockRootAtSlot(state, aSlot)
|
||||
r, err := helpers.BlockRootAtSlot(state, a.Data.Slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -139,6 +138,12 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *pb
|
||||
}
|
||||
if record.IsPrevEpochAttester {
|
||||
vp[i].IsPrevEpochAttester = true
|
||||
// Update attestation inclusion info if inclusion slot is lower than before
|
||||
if inclusionSlot < vp[i].InclusionSlot {
|
||||
vp[i].InclusionSlot = aSlot + a.InclusionDelay
|
||||
vp[i].InclusionDistance = a.InclusionDelay
|
||||
vp[i].ProposerIndex = a.ProposerIndex
|
||||
}
|
||||
}
|
||||
if record.IsPrevEpochTargetAttester {
|
||||
vp[i].IsPrevEpochTargetAttester = true
|
||||
@@ -146,13 +151,6 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *pb
|
||||
if record.IsPrevEpochHeadAttester {
|
||||
vp[i].IsPrevEpochHeadAttester = true
|
||||
}
|
||||
|
||||
// Update attestation inclusion info if inclusion slot is lower than before
|
||||
if inclusionSlot < vp[i].InclusionSlot {
|
||||
vp[i].InclusionSlot = aSlot + a.InclusionDelay
|
||||
vp[i].InclusionDistance = a.InclusionDelay
|
||||
vp[i].ProposerIndex = a.ProposerIndex
|
||||
}
|
||||
}
|
||||
return vp
|
||||
}
|
||||
|
||||
@@ -5,16 +5,15 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestUpdateValidator(t *testing.T) {
|
||||
func TestUpdateValidator_Works(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
vp := []*precompute.Validator{{}, {InclusionSlot: e}, {}, {InclusionSlot: e}, {}, {InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
@@ -33,6 +32,21 @@ func TestUpdateValidator(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateValidator_InclusionOnlyCountsPrevEpoch(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
vp := []*precompute.Validator{{InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true}
|
||||
a := &pb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
|
||||
// Verify inclusion info doesnt get updated.
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{0}, a, 100)
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []*precompute.Validator{wanted}
|
||||
if !reflect.DeepEqual(vp, wantedVp) {
|
||||
t.Error("Incorrect attesting validator calculations")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateBalance(t *testing.T) {
|
||||
vp := []*precompute.Validator{
|
||||
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100},
|
||||
@@ -58,22 +72,12 @@ func TestUpdateBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSameHead(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
beaconState.Slot = 1
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0},
|
||||
Crosslink: ðpb.Crosslink{Shard: 0}}}
|
||||
attSlot, err := helpers.AttestationDataSlot(beaconState, att.Data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}}
|
||||
r := []byte{'A'}
|
||||
beaconState.BlockRoots[attSlot] = r
|
||||
beaconState.BlockRoots[0] = r
|
||||
att.Data.BeaconBlockRoot = r
|
||||
same, err := precompute.SameHead(beaconState, &pb.PendingAttestation{Data: att.Data})
|
||||
if err != nil {
|
||||
@@ -93,21 +97,12 @@ func TestSameHead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSameTarget(t *testing.T) {
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
beaconState.Slot = 1
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0},
|
||||
Crosslink: ðpb.Crosslink{Shard: 0}}}
|
||||
attSlot, err := helpers.AttestationDataSlot(beaconState, att.Data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}}
|
||||
r := []byte{'A'}
|
||||
beaconState.BlockRoots[attSlot] = r
|
||||
beaconState.BlockRoots[0] = r
|
||||
att.Data.Target.Root = r
|
||||
same, err := precompute.SameTarget(beaconState, &pb.PendingAttestation{Data: att.Data}, 0)
|
||||
if err != nil {
|
||||
@@ -127,21 +122,12 @@ func TestSameTarget(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttestedPrevEpoch(t *testing.T) {
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
beaconState.Slot = params.BeaconConfig().SlotsPerEpoch
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0},
|
||||
Crosslink: ðpb.Crosslink{Shard: 960}}}
|
||||
attSlot, err := helpers.AttestationDataSlot(beaconState, att.Data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}}
|
||||
r := []byte{'A'}
|
||||
beaconState.BlockRoots[attSlot] = r
|
||||
beaconState.BlockRoots[0] = r
|
||||
att.Data.Target.Root = r
|
||||
att.Data.BeaconBlockRoot = r
|
||||
votedEpoch, votedTarget, votedHead, err := precompute.AttestedPrevEpoch(beaconState, &pb.PendingAttestation{Data: att.Data})
|
||||
@@ -160,21 +146,12 @@ func TestAttestedPrevEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttestedCurrentEpoch(t *testing.T) {
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
beaconState.Slot = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1},
|
||||
Crosslink: ðpb.Crosslink{}}}
|
||||
attSlot, err := helpers.AttestationDataSlot(beaconState, att.Data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Target: ðpb.Checkpoint{Epoch: 1}}}
|
||||
r := []byte{'A'}
|
||||
beaconState.BlockRoots[attSlot] = r
|
||||
beaconState.BlockRoots[params.BeaconConfig().SlotsPerEpoch] = r
|
||||
att.Data.Target.Root = r
|
||||
att.Data.BeaconBlockRoot = r
|
||||
votedEpoch, votedTarget, err := precompute.AttestedCurrentEpoch(beaconState, &pb.PendingAttestation{Data: att.Data})
|
||||
@@ -190,26 +167,20 @@ func TestAttestedCurrentEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
validators := uint64(64)
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, validators)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, validators)
|
||||
beaconState.Slot = params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
bf := []byte{0xff}
|
||||
att1 := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0},
|
||||
Crosslink: ðpb.Crosslink{Shard: 960}}, AggregationBits: bf}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}},
|
||||
AggregationBits: bf}
|
||||
att2 := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0},
|
||||
Crosslink: ðpb.Crosslink{Shard: 961}}, AggregationBits: bf}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}},
|
||||
AggregationBits: bf}
|
||||
beaconState.BlockRoots[0] = []byte{'A'}
|
||||
att1.Data.Target.Root = []byte{'A'}
|
||||
att1.Data.BeaconBlockRoot = []byte{'A'}
|
||||
@@ -224,17 +195,26 @@ func TestProcessAttestations(t *testing.T) {
|
||||
vp[i] = &precompute.Validator{CurrentEpochEffectiveBalance: 100}
|
||||
}
|
||||
bp := &precompute.Balance{}
|
||||
vp, bp, err = precompute.ProcessAttestations(context.Background(), beaconState, vp, bp)
|
||||
vp, bp, err := precompute.ProcessAttestations(context.Background(), beaconState, vp, bp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
indices, _ := helpers.AttestingIndices(beaconState, att1.Data, att1.AggregationBits)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
indices, _ := helpers.AttestingIndices(att1.AggregationBits, committee)
|
||||
for _, i := range indices {
|
||||
if !vp[i].IsPrevEpochAttester {
|
||||
t.Error("Not a prev epoch attester")
|
||||
}
|
||||
}
|
||||
indices, _ = helpers.AttestingIndices(beaconState, att2.Data, att2.AggregationBits)
|
||||
committee, err = helpers.BeaconCommitteeFromState(beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
indices, _ = helpers.AttestingIndices(att2.AggregationBits, committee)
|
||||
for _, i := range indices {
|
||||
if !vp[i].IsPrevEpochAttester {
|
||||
t.Error("Not a prev epoch attester")
|
||||
|
||||
@@ -2,9 +2,9 @@ package precompute
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -40,9 +40,9 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(128)}) {
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(64)}) {
|
||||
t.Errorf("Wanted current justified root: %v, got: %v",
|
||||
[]byte{byte(128)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
[]byte{byte(64)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
}
|
||||
if newState.CurrentJustifiedCheckpoint.Epoch != 2 {
|
||||
t.Errorf("Wanted justified epoch: %d, got: %d",
|
||||
@@ -90,9 +90,9 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(128)}) {
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(64)}) {
|
||||
t.Errorf("Wanted current justified root: %v, got: %v",
|
||||
[]byte{byte(128)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
[]byte{byte(64)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
}
|
||||
if newState.CurrentJustifiedCheckpoint.Epoch != 2 {
|
||||
t.Errorf("Wanted justified epoch: %d, got: %d",
|
||||
@@ -139,9 +139,9 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(128)}) {
|
||||
if !bytes.Equal(newState.CurrentJustifiedCheckpoint.Root, []byte{byte(64)}) {
|
||||
t.Errorf("Wanted current justified root: %v, got: %v",
|
||||
[]byte{byte(128)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
[]byte{byte(64)}, newState.CurrentJustifiedCheckpoint.Root)
|
||||
}
|
||||
if newState.PreviousJustifiedCheckpoint.Epoch != 0 {
|
||||
t.Errorf("Wanted previous justified epoch: %d, got: %d",
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package precompute
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
@@ -30,14 +29,9 @@ func ProcessRewardsAndPenaltiesPrecompute(state *pb.BeaconState, bp *Balance, vp
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation delta")
|
||||
}
|
||||
clRewards, clPenalties, err := crosslinkDeltaPreCompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get crosslink delta")
|
||||
}
|
||||
|
||||
for i := 0; i < len(state.Validators); i++ {
|
||||
state = helpers.IncreaseBalance(state, uint64(i), attsRewards[i]+clRewards[i]+proposerRewards[i])
|
||||
state = helpers.DecreaseBalance(state, uint64(i), attsPenalties[i]+clPenalties[i])
|
||||
state = helpers.IncreaseBalance(state, uint64(i), attsRewards[i]+proposerRewards[i])
|
||||
state = helpers.DecreaseBalance(state, uint64(i), attsPenalties[i])
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
@@ -70,8 +64,7 @@ func attestationDelta(state *pb.BeaconState, bp *Balance, v *Validator) (uint64,
|
||||
r += br * bp.PrevEpochAttesters / bp.CurrentEpoch
|
||||
proposerReward := br / params.BeaconConfig().ProposerRewardQuotient
|
||||
maxAtteserReward := br - proposerReward
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
r += maxAtteserReward * (slotsPerEpoch + params.BeaconConfig().MinAttestationInclusionDelay - v.InclusionDistance) / slotsPerEpoch
|
||||
r += maxAtteserReward / v.InclusionDistance
|
||||
} else {
|
||||
p += br
|
||||
}
|
||||
@@ -83,7 +76,7 @@ func attestationDelta(state *pb.BeaconState, bp *Balance, v *Validator) (uint64,
|
||||
p += br
|
||||
}
|
||||
|
||||
// Process heard reward / penalty
|
||||
// Process head reward / penalty
|
||||
if v.IsPrevEpochHeadAttester && !v.IsSlashed {
|
||||
r += br * bp.PrevEpochHeadAttesters / bp.CurrentEpoch
|
||||
} else {
|
||||
@@ -118,48 +111,3 @@ func proposerDeltaPrecompute(state *pb.BeaconState, bp *Balance, vp []*Validator
|
||||
}
|
||||
return rewards, nil
|
||||
}
|
||||
|
||||
// This computes the rewards and penalties differences for individual validators based on the
|
||||
// crosslink records.
|
||||
func crosslinkDeltaPreCompute(state *pb.BeaconState, bp *Balance, vp []*Validator) ([]uint64, []uint64, error) {
|
||||
rewards := make([]uint64, len(state.Validators))
|
||||
penalties := make([]uint64, len(state.Validators))
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
count, err := helpers.CommitteeCount(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get epoch committee count")
|
||||
}
|
||||
startShard, err := helpers.StartShard(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get epoch start shard")
|
||||
}
|
||||
for i := uint64(0); i < count; i++ {
|
||||
shard := (startShard + i) % params.BeaconConfig().ShardCount
|
||||
committee, err := helpers.CrosslinkCommittee(state, prevEpoch, shard)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get crosslink's committee")
|
||||
}
|
||||
_, attestingIndices, err := epoch.WinningCrosslink(state, shard, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get winning crosslink")
|
||||
}
|
||||
|
||||
attested := make(map[uint64]bool)
|
||||
// Construct a map to look up validators that voted for crosslink.
|
||||
for _, index := range attestingIndices {
|
||||
attested[index] = true
|
||||
}
|
||||
committeeBalance := helpers.TotalBalance(state, committee)
|
||||
attestingBalance := helpers.TotalBalance(state, attestingIndices)
|
||||
|
||||
for _, index := range committee {
|
||||
base := vp[i].CurrentEpochEffectiveBalance * params.BeaconConfig().BaseRewardFactor / mathutil.IntegerSquareRoot(bp.CurrentEpoch) / params.BeaconConfig().BaseRewardsPerEpoch
|
||||
if _, ok := attested[index]; ok {
|
||||
rewards[index] += base * attestingBalance / committeeBalance
|
||||
} else {
|
||||
penalties[index] += base
|
||||
}
|
||||
}
|
||||
}
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
@@ -4,28 +4,22 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
state := buildState(e+3, validatorCount)
|
||||
startShard := uint64(960)
|
||||
atts := make([]*pb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = &pb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Crosslink: ðpb.Crosslink{
|
||||
Shard: startShard + uint64(i),
|
||||
DataRoot: []byte{'A'},
|
||||
},
|
||||
Target: ðpb.Checkpoint{},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
@@ -34,15 +28,6 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
}
|
||||
}
|
||||
state.PreviousEpochAttestations = atts
|
||||
state.CurrentCrosslinks[startShard] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'},
|
||||
}
|
||||
state.CurrentCrosslinks[startShard+1] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'},
|
||||
}
|
||||
state.CurrentCrosslinks[startShard+2] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'},
|
||||
}
|
||||
|
||||
vp, bp := New(context.Background(), state)
|
||||
vp, bp, err := ProcessAttestations(context.Background(), state, vp, bp)
|
||||
@@ -56,14 +41,14 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
}
|
||||
|
||||
// Indices that voted everything except for head, lost a bit money
|
||||
wanted := uint64(31999995452)
|
||||
wanted := uint64(31999810265)
|
||||
if state.Balances[4] != wanted {
|
||||
t.Errorf("wanted balance: %d, got: %d",
|
||||
wanted, state.Balances[4])
|
||||
}
|
||||
|
||||
// Indices that did not vote, lost more money
|
||||
wanted = uint64(31999949392)
|
||||
wanted = uint64(31999873505)
|
||||
if state.Balances[0] != wanted {
|
||||
t.Errorf("wanted balance: %d, got: %d",
|
||||
wanted, state.Balances[0])
|
||||
@@ -71,19 +56,13 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
state := buildState(e+2, validatorCount)
|
||||
startShard := uint64(960)
|
||||
atts := make([]*pb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = &pb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Crosslink: ðpb.Crosslink{
|
||||
Shard: startShard + uint64(i),
|
||||
DataRoot: []byte{'A'},
|
||||
},
|
||||
Target: ðpb.Checkpoint{},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
@@ -92,12 +71,6 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
}
|
||||
}
|
||||
state.PreviousEpochAttestations = atts
|
||||
state.CurrentCrosslinks[startShard] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'},
|
||||
}
|
||||
state.CurrentCrosslinks[startShard+1] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'},
|
||||
}
|
||||
|
||||
vp, bp := New(context.Background(), state)
|
||||
vp, bp, err := ProcessAttestations(context.Background(), state, vp, bp)
|
||||
@@ -118,7 +91,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
attestedIndices := []uint64{5, 754, 797, 1637, 1770, 1862, 1192}
|
||||
attestedIndices := []uint64{100, 106, 196, 641, 654, 1606}
|
||||
for _, i := range attestedIndices {
|
||||
base, err := epoch.BaseReward(state, i)
|
||||
if err != nil {
|
||||
@@ -157,78 +130,6 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrosslinkDeltaPrecompute(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
helpers.ClearShuffledValidatorCache()
|
||||
validatorCount := uint64(2048)
|
||||
state := buildState(e+2, validatorCount)
|
||||
startShard := uint64(960)
|
||||
atts := make([]*pb.PendingAttestation, 2)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = &pb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Crosslink: ðpb.Crosslink{
|
||||
Shard: startShard + uint64(i),
|
||||
DataRoot: []byte{'A'},
|
||||
},
|
||||
Target: ðpb.Checkpoint{},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
InclusionDelay: uint64(i + 100),
|
||||
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x01},
|
||||
}
|
||||
}
|
||||
state.PreviousEpochAttestations = atts
|
||||
state.CurrentCrosslinks[startShard] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'}, Shard: startShard,
|
||||
}
|
||||
state.CurrentCrosslinks[startShard+1] = ðpb.Crosslink{
|
||||
DataRoot: []byte{'A'}, Shard: startShard + 1,
|
||||
}
|
||||
|
||||
vp, bp := New(context.Background(), state)
|
||||
vp, bp, err := ProcessAttestations(context.Background(), state, vp, bp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rewards, penalties, err := crosslinkDeltaPreCompute(state, bp, vp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
attestedIndices := []uint64{5, 16, 336, 797, 1082, 1450, 1770, 1958}
|
||||
for _, i := range attestedIndices {
|
||||
// Since all these validators attested, they should get the same rewards.
|
||||
want := uint64(12649)
|
||||
if rewards[i] != want {
|
||||
t.Errorf("Wanted reward balance %d, got %d", want, rewards[i])
|
||||
}
|
||||
// Since all these validators attested, they shouldn't get penalized.
|
||||
if penalties[i] != 0 {
|
||||
t.Errorf("Wanted penalty balance 0, got %d", penalties[i])
|
||||
}
|
||||
}
|
||||
|
||||
nonAttestedIndices := []uint64{12, 23, 45, 79}
|
||||
for _, i := range nonAttestedIndices {
|
||||
base, err := epoch.BaseReward(state, i)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get base reward: %v", err)
|
||||
}
|
||||
wanted := base
|
||||
// Since all these validators did not attest, they shouldn't get rewarded.
|
||||
if rewards[i] != 0 {
|
||||
t.Errorf("Wanted reward balance 0, got %d", rewards[i])
|
||||
}
|
||||
// Base penalties for not attesting.
|
||||
if penalties[i] != wanted {
|
||||
t.Errorf("Wanted penalty balance %d, got %d", wanted, penalties[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -259,10 +160,7 @@ func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
|
||||
Slot: slot,
|
||||
Balances: validatorBalances,
|
||||
Validators: validators,
|
||||
CurrentCrosslinks: make([]*ethpb.Crosslink, params.BeaconConfig().ShardCount),
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
ActiveIndexRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CompactCommitteesRoots: make([][]byte, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerEpoch*10),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
|
||||
@@ -4,10 +4,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -97,8 +96,6 @@ func TestProcessSlashingsPrecompute_SlashedLess(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(string(i), func(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
ab := uint64(0)
|
||||
for i, b := range tt.state.Balances {
|
||||
// Skip validator 0 since it's slashed
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package spectest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCrosslinksProcessingMainnet(t *testing.T) {
|
||||
runCrosslinkProcessingTests(t, "mainnet")
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package spectest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCrosslinksProcessingMinimal(t *testing.T) {
|
||||
runCrosslinkProcessingTests(t, "minimal")
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package spectest
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func runCrosslinkProcessingTests(t *testing.T, config string) {
|
||||
if err := spectest.SetConfig(config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testFolders, testsFolderPath := testutil.TestFolders(t, config, "epoch_processing/crosslinks/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
folderPath := path.Join(testsFolderPath, folder.Name())
|
||||
testutil.RunEpochOperationTest(t, folderPath, processCrosslinksWrapper)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func processCrosslinksWrapper(t *testing.T, state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
state, err := epoch.ProcessCrosslinks(state)
|
||||
if err != nil {
|
||||
t.Fatalf("could not process crosslinks: %v", err)
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
@@ -5,6 +5,5 @@ import (
|
||||
)
|
||||
|
||||
func TestJustificationAndFinalizationMinimal(t *testing.T) {
|
||||
t.Skip("Fails for could not get target atts current epoch")
|
||||
runJustificationAndFinalizationTests(t, "minimal")
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -23,40 +21,11 @@ func runJustificationAndFinalizationTests(t *testing.T, config string) {
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
folderPath := path.Join(testsFolderPath, folder.Name())
|
||||
testutil.RunEpochOperationTest(t, folderPath, processJustificationAndFinalizationWrapper)
|
||||
testutil.RunEpochOperationTest(t, folderPath, processJustificationAndFinalizationPrecomputeWrapper)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This is a subset of state.ProcessEpoch. The spec test defines input data for
|
||||
// `justification_and_finalization` only.
|
||||
func processJustificationAndFinalizationWrapper(t *testing.T, state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
prevEpochAtts, err := epoch.MatchAttestations(state, helpers.PrevEpoch(state))
|
||||
if err != nil {
|
||||
t.Fatalf("could not get target atts prev epoch %d: %v", helpers.PrevEpoch(state), err)
|
||||
}
|
||||
currentEpochAtts, err := epoch.MatchAttestations(state, helpers.CurrentEpoch(state))
|
||||
if err != nil {
|
||||
t.Fatalf("could not get target atts current epoch %d: %v", helpers.CurrentEpoch(state), err)
|
||||
}
|
||||
prevEpochAttestedBalance, err := epoch.AttestingBalance(state, prevEpochAtts.Target)
|
||||
if err != nil {
|
||||
t.Fatalf("could not get attesting balance prev epoch: %v", err)
|
||||
}
|
||||
currentEpochAttestedBalance, err := epoch.AttestingBalance(state, currentEpochAtts.Target)
|
||||
if err != nil {
|
||||
t.Fatalf("could not get attesting balance current epoch: %v", err)
|
||||
}
|
||||
|
||||
state, err = epoch.ProcessJustificationAndFinalization(state, prevEpochAttestedBalance, currentEpochAttestedBalance)
|
||||
if err != nil {
|
||||
t.Fatalf("could not process justification: %v", err)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
ctx := context.Background()
|
||||
vp, bp := precompute.New(ctx, state)
|
||||
|
||||
37
beacon-chain/core/exit/BUILD.bazel
Normal file
37
beacon-chain/core/exit/BUILD.bazel
Normal file
@@ -0,0 +1,37 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/exit",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/roughtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
66
beacon-chain/core/exit/validation.go
Normal file
66
beacon-chain/core/exit/validation.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package exit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
)
|
||||
|
||||
// ValidateVoluntaryExit validates the voluntary exit.
|
||||
// If it is invalid for some reason an error, if valid it will return no error.
|
||||
func ValidateVoluntaryExit(state *pb.BeaconState, genesisTime time.Time, signed *ethpb.SignedVoluntaryExit) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil signed voluntary exit")
|
||||
}
|
||||
ve := signed.Exit
|
||||
if ve.ValidatorIndex >= uint64(len(state.Validators)) {
|
||||
return fmt.Errorf("unknown validator index %d", ve.ValidatorIndex)
|
||||
}
|
||||
validator := state.Validators[ve.ValidatorIndex]
|
||||
|
||||
if !helpers.IsActiveValidator(validator, ve.Epoch) {
|
||||
return fmt.Errorf("validator %d not active at epoch %d", ve.ValidatorIndex, ve.Epoch)
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return fmt.Errorf("validator %d already exiting or exited", ve.ValidatorIndex)
|
||||
}
|
||||
|
||||
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := uint64(roughtime.Now().Unix()-genesisTime.Unix()) / secondsPerEpoch
|
||||
earliestRequestedExitEpoch := mathutil.Max(ve.Epoch, currentEpoch)
|
||||
earliestExitEpoch := validator.ActivationEpoch + params.BeaconConfig().PersistentCommitteePeriod
|
||||
if earliestRequestedExitEpoch < earliestExitEpoch {
|
||||
return fmt.Errorf("validator %d cannot exit before epoch %d", ve.ValidatorIndex, earliestExitEpoch)
|
||||
}
|
||||
|
||||
// Confirm signature is valid
|
||||
root, err := ssz.HashTreeRoot(ve)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot confirm signature")
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(signed.Signature)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "malformed signature")
|
||||
}
|
||||
validatorPubKey, err := bls.PublicKeyFromBytes(validator.PublicKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid validator public key")
|
||||
}
|
||||
domain := bls.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit)
|
||||
verified := sig.Verify(root[:], validatorPubKey, domain)
|
||||
if !verified {
|
||||
return errors.New("incorrect signature")
|
||||
}
|
||||
|
||||
// Parameters are valid.
|
||||
return nil
|
||||
}
|
||||
125
beacon-chain/core/exit/validation_test.go
Normal file
125
beacon-chain/core/exit/validation_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package exit_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
blk "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/exit"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
// Set genesis to a small set for faster test processing.
|
||||
func init() {
|
||||
p := params.BeaconConfig()
|
||||
p.MinGenesisActiveValidatorCount = 8
|
||||
params.OverrideBeaconConfig(p)
|
||||
}
|
||||
|
||||
func TestValidation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
epoch uint64
|
||||
validatorIndex uint64
|
||||
signature []byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "MissingValidator",
|
||||
epoch: 2048,
|
||||
validatorIndex: 16,
|
||||
err: errors.New("unknown validator index 16"),
|
||||
},
|
||||
{
|
||||
name: "EarlyExit",
|
||||
epoch: 2047,
|
||||
validatorIndex: 0,
|
||||
err: errors.New("validator 0 cannot exit before epoch 2048"),
|
||||
},
|
||||
{
|
||||
name: "NoSignature",
|
||||
epoch: 2048,
|
||||
validatorIndex: 0,
|
||||
err: errors.New("malformed signature: signature must be 96 bytes"),
|
||||
},
|
||||
{
|
||||
name: "InvalidSignature",
|
||||
epoch: 2048,
|
||||
validatorIndex: 0,
|
||||
signature: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
err: errors.New("malformed signature: could not unmarshal bytes into signature: err blsSignatureDeserialize 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
name: "IncorrectSignature",
|
||||
epoch: 2048,
|
||||
validatorIndex: 0,
|
||||
signature: []byte{0xab, 0xb0, 0x12, 0x4c, 0x75, 0x74, 0xf2, 0x81, 0xa2, 0x93, 0xf4, 0x18, 0x5c, 0xad, 0x3c, 0xb2, 0x26, 0x81, 0xd5, 0x20, 0x91, 0x7c, 0xe4, 0x66, 0x65, 0x24, 0x3e, 0xac, 0xb0, 0x51, 0x00, 0x0d, 0x8b, 0xac, 0xf7, 0x5e, 0x14, 0x51, 0x87, 0x0c, 0xa6, 0xb3, 0xb9, 0xe6, 0xc9, 0xd4, 0x1a, 0x7b, 0x02, 0xea, 0xd2, 0x68, 0x5a, 0x84, 0x18, 0x8a, 0x4f, 0xaf, 0xd3, 0x82, 0x5d, 0xaf, 0x6a, 0x98, 0x96, 0x25, 0xd7, 0x19, 0xcc, 0xd2, 0xd8, 0x3a, 0x40, 0x10, 0x1f, 0x4a, 0x45, 0x3f, 0xca, 0x62, 0x87, 0x8c, 0x89, 0x0e, 0xca, 0x62, 0x23, 0x63, 0xf9, 0xdd, 0xb8, 0xf3, 0x67, 0xa9, 0x1e, 0x84},
|
||||
err: errors.New("incorrect signature"),
|
||||
},
|
||||
{
|
||||
name: "Good",
|
||||
epoch: 2048,
|
||||
validatorIndex: 0,
|
||||
signature: []byte{0xb3, 0xe1, 0x9d, 0xc6, 0x7c, 0x78, 0x6c, 0xcf, 0x33, 0x1d, 0xb9, 0x6f, 0x59, 0x64, 0x44, 0xe1, 0x29, 0xd0, 0x87, 0x03, 0x26, 0x6e, 0x49, 0x1c, 0x05, 0xae, 0x16, 0x7b, 0x04, 0x0f, 0x3f, 0xf8, 0x82, 0x77, 0x60, 0xfc, 0xcf, 0x2f, 0x59, 0xc7, 0x40, 0x0b, 0x2c, 0xa9, 0x23, 0x8a, 0x6c, 0x8d, 0x01, 0x21, 0x5e, 0xa8, 0xac, 0x36, 0x70, 0x31, 0xb0, 0xe1, 0xa8, 0xb8, 0x8f, 0x93, 0x8c, 0x1c, 0xa2, 0x86, 0xe7, 0x22, 0x00, 0x6a, 0x7d, 0x36, 0xc0, 0x2b, 0x86, 0x2c, 0xf5, 0xf9, 0x10, 0xb9, 0xf2, 0xbd, 0x5e, 0xa6, 0x5f, 0x12, 0x86, 0x43, 0x20, 0x4d, 0xa2, 0x9d, 0x8b, 0xe6, 0x6f, 0x09},
|
||||
},
|
||||
}
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
defer dbutil.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
deposits, _, _ := testutil.DeterministicDepositsAndKeys(params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block := blk.NewGenesisBlock([]byte{})
|
||||
if err := db.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatalf("Could not save genesis block: %v", err)
|
||||
}
|
||||
genesisRoot, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get signing root %v", err)
|
||||
}
|
||||
|
||||
// Set genesis time to be 100 epochs ago
|
||||
genesisTime := time.Now().Add(time.Duration(-100*int64(params.BeaconConfig().SecondsPerSlot*params.BeaconConfig().SlotsPerEpoch)) * time.Second)
|
||||
mockChainService := &mockChain.ChainService{State: beaconState, Root: genesisRoot[:], Genesis: genesisTime}
|
||||
headState, err := mockChainService.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal("Failed to obtain head state")
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: test.epoch,
|
||||
ValidatorIndex: test.validatorIndex,
|
||||
},
|
||||
Signature: test.signature,
|
||||
}
|
||||
|
||||
err := exit.ValidateVoluntaryExit(headState, genesisTime, req)
|
||||
if test.err == nil {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: received %v", err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Error("Failed to receive expected error")
|
||||
}
|
||||
if err.Error() != test.err.Error() {
|
||||
t.Errorf("Unexpected error: expected %s, received %s", test.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
8
beacon-chain/core/feed/BUILD.bazel
Normal file
8
beacon-chain/core/feed/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["event.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/feed",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
)
|
||||
19
beacon-chain/core/feed/event.go
Normal file
19
beacon-chain/core/feed/event.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package feed
|
||||
|
||||
// How to add a new event to the feed:
|
||||
// 1. Add a file for the new type of feed.
|
||||
// 2. Add a constant describing the list of events.
|
||||
// 3. Add a structure with the name `<event>Data` containing any data fields that should be supplied with the event.
|
||||
//
|
||||
// Note that the same event is supplied to all subscribers, so the event received by subscribers should be considered read-only.
|
||||
|
||||
// EventType is the type that defines the type of event.
|
||||
type EventType int
|
||||
|
||||
// Event is the event that is sent with operation feed updates.
|
||||
type Event struct {
|
||||
// Type is the type of event.
|
||||
Type EventType
|
||||
// Data is event-specific data.
|
||||
Data interface{}
|
||||
}
|
||||
15
beacon-chain/core/feed/operation/BUILD.bazel
Normal file
15
beacon-chain/core/feed/operation/BUILD.bazel
Normal file
@@ -0,0 +1,15 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"events.go",
|
||||
"notifier.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//shared/event:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
36
beacon-chain/core/feed/operation/events.go
Normal file
36
beacon-chain/core/feed/operation/events.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package operation
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnaggregatedAttReceived is sent after an unaggregated attestation object has been received
|
||||
// from the outside world. (eg. in RPC or sync)
|
||||
UnaggregatedAttReceived = iota + 1
|
||||
|
||||
// AggregatedAttReceived is sent after an aggregated attestation object has been received
|
||||
// from the outside world. (eg. in sync)
|
||||
AggregatedAttReceived
|
||||
|
||||
// ExitReceived is sent after an voluntary exit object has been received from the outside world (eg in RPC or sync)
|
||||
ExitReceived
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
type UnAggregatedAttReceivedData struct {
|
||||
// Attestation is the unaggregated attestation object.
|
||||
Attestation *ethpb.Attestation
|
||||
}
|
||||
|
||||
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
|
||||
type AggregatedAttReceivedData struct {
|
||||
// Attestation is the aggregated attestation object.
|
||||
Attestation *ethpb.AggregateAttestationAndProof
|
||||
}
|
||||
|
||||
// ExitReceivedData is the data sent with ExitReceived events.
|
||||
type ExitReceivedData struct {
|
||||
// Exit is the voluntary exit object.
|
||||
Exit *ethpb.SignedVoluntaryExit
|
||||
}
|
||||
8
beacon-chain/core/feed/operation/notifier.go
Normal file
8
beacon-chain/core/feed/operation/notifier.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package operation
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/shared/event"
|
||||
|
||||
// Notifier interface defines the methods of the service that provides beacon block operation updates to consumers.
|
||||
type Notifier interface {
|
||||
OperationFeed() *event.Feed
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user