Compare commits

..

83 Commits

Author SHA1 Message Date
Raul Jordan
8b8a38039b radek feedback 2022-10-25 11:49:48 -04:00
Raul Jordan
5410c30890 Merge branch 'blob-rotating-buffer' of github.com:rauljordan/prysm into blob-rotating-buffer 2022-10-25 11:45:56 -04:00
Raul Jordan
8024758ff4 Update beacon-chain/db/kv/blobs.go
Co-authored-by: Radosław Kapka <radek@prysmaticlabs.com>
2022-10-25 11:42:14 -04:00
Raul Jordan
826a930ed0 Update beacon-chain/db/kv/blobs.go
Co-authored-by: Radosław Kapka <radek@prysmaticlabs.com>
2022-10-25 11:42:09 -04:00
Raul Jordan
3d45b83059 Update beacon-chain/db/kv/blobs.go
Co-authored-by: Radosław Kapka <radek@prysmaticlabs.com>
2022-10-25 11:39:58 -04:00
Raul Jordan
9096c4a8c4 dedup comment 2022-10-24 21:48:25 -04:00
Raul Jordan
31f369c982 comment 2022-10-24 21:42:51 -04:00
Raul Jordan
8a7ab754b2 algorithm and test done 2022-10-24 21:33:19 -04:00
Raul Jordan
752bb63ec4 save blobs test 2022-10-24 21:15:10 -04:00
Raul Jordan
52a87e912d unit test 2022-10-24 20:59:14 -04:00
Raul Jordan
3400af655c rotating buffer db schema for blobs 2022-10-24 20:45:54 -04:00
terence tsao
1e5c0778b9 Add compute_aggregated_poly_and_commitment 2022-09-27 13:09:42 -07:00
terence tsao
a032cf2ccc Sync with upstream 2022-09-23 13:46:04 -07:00
terence tsao
f871d7668f Merge branch 'develop' of github.com:terencechain/prysm into eip4844 2022-09-23 13:19:21 -07:00
terence tsao
5c2ebec369 Can build 2022-08-20 13:11:44 -07:00
terence tsao
a22d62e398 Sync with upstream 2022-08-20 12:43:41 -07:00
terence tsao
00c57e76ec Sync with upstream 2022-08-09 20:28:04 -07:00
terence tsao
1d800e42c5 Merge branch 'develop' of https://github.com/prysmaticlabs/prysm into eip4844 2022-08-09 20:20:40 -07:00
terence tsao
1391ccedd2 Refactor forkchoice valid data status 2022-08-05 17:55:50 -07:00
terence tsao
e71124ee87 Clean up db and generated pbs 2022-08-04 16:51:15 -07:00
terence tsao
f04b2be9de Sync with native block changes 2022-08-04 11:13:35 -07:00
terence tsao
080810ec25 Merge branch 'develop' of https://github.com/prysmaticlabs/prysm into eip4844 2022-08-04 10:35:14 -07:00
terence tsao
37118e7815 Clean up core 2022-08-02 16:56:17 -07:00
terence tsao
a390c74721 Encapsulate sidecar within signed block interface 2022-08-01 15:49:00 -07:00
terence tsao
9b260be2c7 Sync with upstream 2022-07-31 17:45:38 -07:00
terence tsao
bd6cf8d5d8 Sync with @inphi's clean-slate and upstream 2022-07-31 17:37:02 -07:00
terence tsao
bff4435b72 Merge branch 'inphi/clean-slate' of https://github.com/inphi/prysm into eip4844 2022-07-31 16:39:54 -07:00
terence tsao
d8187b7bb1 Merge branch 'develop' of https://github.com/prysmaticlabs/prysm into eip4844 2022-07-30 18:41:12 -07:00
inphi
109a0f5f0f update EIP4844 BeaconChainConfig 2022-07-28 15:15:29 -04:00
inphi
bc0ba2d508 Rate-limit sidecar requests during initial-sync 2022-07-28 02:59:20 -04:00
inphi
5b53ad6cdf Add EIP-4844 bootstrap node for devnet 2022-07-27 01:18:33 -04:00
inphi
920e0757d9 Update bazel deps; errcheck linting 2022-07-22 12:00:29 -04:00
Michael de Hoog
bd01ffaa97 Add aggregated proof to APIs (#10) 2022-07-21 15:18:38 -04:00
inphi
41f34aa6dd tweak eip4844 testnet configs 2022-07-13 11:56:16 -04:00
inphi
48c26b9eec configure blobs prune period in testnet 2022-07-13 01:53:09 -04:00
inphi
eed1e47664 Add --eip4844 testnet flag for ez config 2022-07-13 01:47:41 -04:00
inphi
b18ebab455 Incorporate data availability to fork-choice 2022-07-08 17:08:27 -04:00
inphi
ebe1e9ec3a nit 2022-07-05 18:38:03 -04:00
inphi
b2b4aa9406 Prune blobs older than MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS 2022-07-05 16:39:02 -04:00
inphi
7d6f079697 testing: validateBeaconBlock w/ kzgs 2022-06-30 19:47:39 -04:00
Murphy Law
b2a9a79e96 Sync: Blobs Sidecar (#8)
* Add MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS config
* Implement inital-sync for blob sidecars
* Assert data availability before transitioning blocks from INVALID to VALID
* Harden sidecar validation in pubsub subscriber
* Introduce sidecar pending queue to defer processing for out-of-order block/sidecars
2022-06-30 14:58:06 -04:00
inphi
cb524c5cef Merge remote-tracking branch 'origin/develop' into inphi/clean-slate-merge 2022-06-23 23:53:34 -04:00
Murphy Law
2e0a6d6556 p2p: Handle sidecar message broadcasts (#7)
NOTE: Syncing blobs from genesis (initial-sync) doesn't work yet. This change only allows beacon nodes to synchronized broadcast blob sidecars.
2022-06-16 13:31:26 -04:00
Michael de Hoog
ac4a4cc80d Fix BlobsBundle JSON (un)marshaling (#6)
* Register RPC handlers on EIP-4844 fork

* Fix JSON marshaling of blob bundles
2022-06-16 11:07:50 -04:00
inphi
027e49c120 add IsPreEIP4844Version utility func 2022-06-16 02:05:24 -04:00
inphi
ff822a6f4e Avoid EIP-4844 validation for older block versions 2022-06-16 01:42:40 -04:00
inphi
901a087a6d fix SaveBlobs span typo 2022-06-15 18:37:57 -04:00
inphi
be8c0293ad validate BeaconBlock for EIP-4844 2022-06-15 17:44:34 -04:00
inphi
d5d1b33761 Add rpc_blobs_sidecars_by_range_test to bazel deps 2022-06-14 17:15:57 -04:00
Michael de Hoog
53bb657bd2 EIP-4844 beacon RPCs (#5)
* Add EIP-4844 messages and generate code

* Add v1 to v2 conversion

* Add middleware

* Serialize the blob KZGs rather than the bundles

* Add SignedBlindedBeaconBlockEip4844 to SSZ code generation

* Fix version

* Add debugging conversion

* Add v1alpha blinded implementation

* Switch index of blob sidecar

* Remove BlobsBundle from SSZ for now

* Fix permissions

* Remove EIP-4844 specific beacon state

* Some more missing types

* Remove dangling method

* Use sequential proto ids

* Copy the KZGs array
2022-06-14 16:54:48 -04:00
inphi
e2bf25e088 fix BlobsSidecarsByRange RPC Request routine 2022-06-13 23:40:29 -04:00
inphi
31e1999ddf add generated ssz files for BlobsSidecarsByRangeRequest 2022-06-13 21:08:24 -04:00
inphi
cb3da77ff4 testing: Assert received fields for BlobsSidecarsByRange RPC 2022-06-13 20:44:40 -04:00
inphi
40c754e991 p2p: Implement BlobsSidecarsByRange RPC 2022-06-13 20:33:02 -04:00
inphi
b6a05ecd32 add missing bazel dependencies 2022-06-13 17:55:11 -04:00
inphi
3a6a7d7bcd persist proposed blobs sidecar 2022-06-13 17:46:53 -04:00
inphi
27677da136 assert len(blobkzgs) == len(blob_versioned_hashes) 2022-06-13 16:17:24 -04:00
Michael de Hoog
eb1c7bd5f6 Replace [32]byte with common.Hash (#4) 2022-06-13 16:00:53 -04:00
Michael de Hoog
b3d2ed1aaf TxPeekBlobVersionedHashes: fix hash copying (#3) 2022-06-13 15:50:59 -04:00
Michael de Hoog
bf1b5a5094 [EIP-4844] Fix BlobVersionedHashes SSZ parsing (#1) 2022-06-13 11:38:08 -04:00
inphi
d3b6b2d8bc Fix blobsBundle JSON parsing 2022-06-10 10:05:37 -04:00
inphi
43814449c0 Update geth dependency
Using mdehoog's branch from now on as it contains the latest changes
2022-06-09 18:12:48 -04:00
inphi
533a6b3f0c fix EIP4844 BeaconBlock signature 2022-06-09 17:57:38 -04:00
inphi
8e370da195 Add blob kzgs to the proposed BeaconBlock 2022-06-09 15:12:14 -04:00
inphi
024ad7b433 Fix bazel build
- Update deps.bzl
- Fix generated go pb files due to buggy goimports
2022-06-09 13:10:39 -04:00
inphi
13becadb30 Blobs verification and signing for block proposals
Note: local INTEROP testing works pre-eip484
2022-06-09 12:23:26 -04:00
inphi
e3f1a2d321 Add eip4844 fork configs
- Also bumped the sharding configs so its value can be reused for eip4844
- Applied 01379e0ab5cd216ea7d0afd89b2ff5f64e04d9cb
2022-06-09 10:35:47 -04:00
inphi
0f8c4431fc wip: Getting closer to full blobs impl tip
- Add side as a new field to the beacon-chain/validator gRPC API
- Retrieve blobs from EL via engine API and attach to the BeaconBlock.

Note; This implementation passes manual INTEROP tests for pre-eip4844
block production
2022-06-09 09:51:34 -04:00
inphi
f566d50a43 Merge commit '80546f83e9e15e54668e69308f9c5898db667cf1' into inphi/clean-slate 2022-06-07 01:45:14 -04:00
terence tsao
7fc3a697d8 Add verify kzgs against version hashes 2022-05-24 20:58:31 -07:00
terence tsao
c5ee1c2735 Merge branch 'develop' of https://github.com/prysmaticlabs/prysm into eip4844 2022-05-24 20:32:10 -07:00
terence tsao
658725a601 Port eip4844 changes from Prysm 2022-05-22 09:20:20 -07:00
terence tsao
80546f83e9 Initial EIP4844 support 2022-03-06 20:26:23 -08:00
prylabs-bulldozer[bot]
d7489f9f0f Merge refs/heads/develop into get-payload 2022-03-06 08:09:17 +00:00
terence tsao
aad947b83f Merge branch 'get-payload' of github.com:prysmaticlabs/prysm into get-payload 2022-03-04 15:06:29 -08:00
terence tsao
7d2a257094 Update proposer_execution_payload_test.go 2022-03-04 15:06:18 -08:00
prylabs-bulldozer[bot]
965280190a Merge refs/heads/develop into get-payload 2022-03-04 22:25:09 +00:00
terence tsao
bcffe3d291 Update proposer_execution_payload.go 2022-03-04 12:20:32 -08:00
terence tsao
e3438cd9ee Update client.go 2022-03-04 12:15:19 -08:00
terence tsao
a8bc8af9e7 Merge branch 'get-payload' of github.com:prysmaticlabs/prysm into get-payload 2022-03-04 11:14:42 -08:00
terence tsao
e70492ec8c Add prepare execution payload 2022-03-04 11:14:24 -08:00
terence tsao
ea9979f813 Add prepare execution payload 2022-03-04 11:13:05 -08:00
terence tsao
e504d31285 Add prepare execution payload 2022-03-04 11:11:02 -08:00
1000 changed files with 29602 additions and 39717 deletions

View File

@@ -21,6 +21,7 @@ build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --stamp
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
@@ -37,6 +38,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --config=llvm
# LLVM compiler for building C/C++ dependencies.
build:llvm --define compiler=llvm
@@ -230,9 +232,3 @@ build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel
# Build the binary with Beacon API calls for the validator
build --flag_alias=use_beacon_api=//validator/client/validator-client-factory:use_beacon_api
build:beacon_api --use_beacon_api
build:beacon_api --define=gotags=use_beacon_api

10
.github/CODEOWNERS vendored
View File

@@ -5,4 +5,12 @@
*.bzl @prestonvanloon
# Anyone on prylabs team can approve dependency updates.
deps.bzl @prysmaticlabs/core-team
deps.bzl @prysmaticlabs/core-team
# Radek and Nishant are responsible for changes that can affect the native state feature.
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan

View File

@@ -26,10 +26,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: 1.18
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.18
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: 1.18
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.50.1
version: v1.47.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: 1.18
id: go
- name: Check out code into the Go module directory

3
.gitignore vendored
View File

@@ -38,6 +38,3 @@ metaData
# execution API authentication
jwt.hex
# manual testing
tmp

View File

@@ -6,7 +6,7 @@ run:
- proto
- tools/analyzers
timeout: 10m
go: '1.19'
go: '1.18'
linters:
disable-all: true

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.2.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.2.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.2.0)
[![Consensus_Spec_Version 1.2.0-rc.3](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.2.0.rc.3-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
[![Execution_API_Version 1.0.0-beta.1](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.1-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -88,10 +88,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "ae013bf35bd23234d1dea46b079f1e05ba74ac0321423830119d3e787ec73483",
sha256 = "16e9fca53ed6bd4ff4ad76facc9b7b651a89db1689a2877d6fd7b82aa824e366",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.34.0/rules_go-v0.34.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.34.0/rules_go-v0.34.0.zip",
],
)
@@ -110,6 +110,13 @@ git_repository(
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
http_archive(
name = "fuzzit_linux",
build_file_content = "exports_files([\"fuzzit\"])",
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -157,15 +164,35 @@ container_pull(
repository = "pinglamb/alpine-glibc",
)
container_pull(
name = "fuzzit_base",
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
registry = "gcr.io",
repository = "fuzzit-public/stretch-llvm8",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
go_register_toolchains(
go_version = "1.19.4",
go_version = "1.18.5",
nogo = "@//:nogo",
)
http_archive(
name = "prysm_testnet_site",
build_file_content = """
proto_library(
name = "faucet_proto",
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -188,7 +215,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.3.0-alpha.1"
consensus_spec_version = "v1.2.0-rc.3"
bls_test_version = "v0.1.1"
@@ -204,7 +231,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b5a65eb5ecef1c4fca82ff29739936fee019e8a529ef392ea5e46aa39f40a0b2",
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -220,7 +247,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b381bb0184e69cb17d05fbbe75f48c6aec7726957d073e3a65c26671d5d27d37",
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -236,7 +263,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9466f2a5a2dea039a2deb953f0b5dce5399400028bf3f218ffef03f8ef9c446c",
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -251,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "3cc3141651a320a1f5767d15826e85aaa96eb4459d9e1a1d3f5a0cdbc79b8f56",
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -315,9 +342,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b2226874526805d64c29e5053fa28e511b57c0860585d6d59777ee81ff4859ca",
sha256 = "e0c0b5dc609b3a221e74c720f483c595441f2ad5e38bb8aa3522636039945a6f",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.2/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.1/prysm-web-ui.tar.gz",
],
)

View File

@@ -1,5 +1,6 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -10,7 +10,6 @@ import (
"net/http"
"net/url"
"text/template"
"time"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -32,7 +31,6 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -254,11 +252,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb *ethpb.SignedBlinded
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}

View File

@@ -81,7 +81,7 @@ func TestClient_Status(t *testing.T) {
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -111,7 +111,6 @@ func TestClient_RegisterValidator(t *testing.T) {
Timestamp: 42,
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}

View File

@@ -23,8 +23,8 @@ type ValidatorRegistration struct {
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Message: &ValidatorRegistration{r.Message},
Signature: r.SignedValidatorRegistrationV1.Signature,
@@ -36,8 +36,8 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
}
o := struct {
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -49,10 +49,10 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}{
FeeRecipient: r.FeeRecipient,
GasLimit: fmt.Sprintf("%d", r.GasLimit),
@@ -66,10 +66,10 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
}
o := struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -183,11 +183,11 @@ func (s Uint64String) MarshalText() ([]byte, error) {
}
type ExecHeaderResponse struct {
Version string `json:"version"`
Version string `json:"version,omitempty"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBid `json:"message"`
} `json:"data"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *BuilderBid `json:"message,omitempty"`
} `json:"data,omitempty"`
}
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
@@ -233,26 +233,26 @@ func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
}
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
Header *ExecutionPayloadHeader `json:"header,omitempty"`
Value Uint256 `json:"value,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
}
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
*v1.ExecutionPayloadHeader
}
@@ -294,25 +294,25 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
}
type ExecPayloadResponse struct {
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
Version string `json:"version,omitempty"`
Data ExecutionPayload `json:"data,omitempty"`
}
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
}
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
@@ -356,8 +356,8 @@ type BlindedBeaconBlockBodyBellatrix struct {
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
@@ -367,10 +367,10 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
@@ -386,8 +386,8 @@ type ProposerSlashing struct {
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
}{
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
@@ -400,8 +400,8 @@ type SignedBeaconBlockHeader struct {
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Header *BeaconBlockHeader `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
Signature: h.SignedBeaconBlockHeader.Signature,
@@ -414,11 +414,11 @@ type BeaconBlockHeader struct {
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
BodyRoot hexutil.Bytes `json:"body_root"`
Slot string `json:"slot,omitempty"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
}{
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
@@ -438,9 +438,9 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
}
return json.Marshal(struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature"`
AttestingIndices []string `json:"attesting_indices,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
AttestingIndices: indices,
Data: &AttestationData{a.IndexedAttestation.Data},
@@ -454,8 +454,8 @@ type AttesterSlashing struct {
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1"`
Attestation2 *IndexedAttestation `json:"attestation_2"`
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
}{
Attestation1: &IndexedAttestation{s.Attestation_1},
Attestation2: &IndexedAttestation{s.Attestation_2},
@@ -468,8 +468,8 @@ type Checkpoint struct {
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
Root hexutil.Bytes `json:"root"`
Epoch string `json:"epoch,omitempty"`
Root hexutil.Bytes `json:"root,omitempty"`
}{
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
Root: c.Checkpoint.Root,
@@ -482,11 +482,11 @@ type AttestationData struct {
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
Index string `json:"index"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
Source *Checkpoint `json:"source"`
Target *Checkpoint `json:"target"`
Slot string `json:"slot,omitempty"`
Index string `json:"index,omitempty"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
Source *Checkpoint `json:"source,omitempty"`
Target *Checkpoint `json:"target,omitempty"`
}{
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
@@ -502,9 +502,9 @@ type Attestation struct {
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
}{
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
Data: &AttestationData{a.Attestation.Data},
@@ -518,10 +518,10 @@ type DepositData struct {
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature hexutil.Bytes `json:"signature"`
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
Amount string `json:"amount,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
PublicKey: d.PublicKey,
WithdrawalCredentials: d.WithdrawalCredentials,
@@ -554,8 +554,8 @@ type SignedVoluntaryExit struct {
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message"`
Signature hexutil.Bytes `json:"signature"`
Message *VoluntaryExit `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
}{
Signature: sve.SignedVoluntaryExit.Signature,
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
@@ -568,8 +568,8 @@ type VoluntaryExit struct {
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
ValidatorIndex string `json:"validator_index"`
Epoch string `json:"epoch,omitempty"`
ValidatorIndex string `json:"validator_index,omitempty"`
}{
Epoch: fmt.Sprintf("%d", ve.Epoch),
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
@@ -582,8 +582,8 @@ type SyncAggregate struct {
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
}{
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
@@ -596,9 +596,9 @@ type Eth1Data struct {
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root"`
DepositCount string `json:"deposit_count"`
BlockHash hexutil.Bytes `json:"block_hash"`
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
DepositCount string `json:"deposit_count,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
}{
DepositRoot: e.DepositRoot,
DepositCount: fmt.Sprintf("%d", e.DepositCount),
@@ -628,16 +628,16 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
Attestations []*Attestation `json:"attestations,omitempty"`
Deposits []*Deposit `json:"deposits,omitempty"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

View File

@@ -3,9 +3,7 @@ Copyright 2017 Albert Tedja
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

View File

@@ -51,9 +51,9 @@ go_library(
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -62,6 +62,7 @@ go_library(
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blobs:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -5,10 +5,10 @@ import (
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -314,7 +314,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return true, err
}
// If fockchoice does not have the headroot, then the node is considered
@@ -338,7 +338,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return false, err
}
// if the requested root is the headroot and the root is not found in

View File

@@ -5,7 +5,6 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -33,7 +32,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -55,7 +54,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wsb},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -75,7 +74,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)

View File

@@ -7,6 +7,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
@@ -84,7 +85,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -109,7 +110,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -136,7 +137,7 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -155,7 +156,7 @@ func TestHeadRoot_UseDB(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -198,7 +199,7 @@ func TestHeadState_CanRetrieve(t *testing.T) {
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
@@ -304,6 +305,31 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
}
func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
//
// A <- B <- C
@@ -311,7 +337,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
// \ ---------- E
// ---------- D
func TestService_ChainHeads(t *testing.T) {
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -403,7 +429,29 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic(t *testing.T) {
func TestService_IsOptimistic_ProtoArray(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
@@ -433,7 +481,24 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -450,7 +515,66 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}

View File

@@ -14,7 +14,7 @@ var (
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
errNilJustifiedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used

View File

@@ -70,6 +70,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
if err != nil {
log.WithError(err).Error("Could not get head payload attribute")
return nil, nil
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
@@ -179,10 +180,10 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
return bytesutil.ToBytes32(payload.BlockHash()), nil
}
// notifyNewPayload signals execution engine on a new payload.
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) (bool, error) {
postStateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()

View File

@@ -13,9 +13,9 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
bstate "github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -31,69 +31,6 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
pid := &v1.PayloadIDBytes{1}
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{PayloadIDBytes: pid}
st, _ = util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
// Intentionally generate a bad state such that `hash_tree_root` fails during `process_slot`
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
arg := &notifyForkchoiceUpdateArg{
headState: s,
headRoot: [32]byte{},
headBlock: b,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(1, 0, [8]byte{}, [32]byte{})
got, err := service.notifyForkchoiceUpdate(ctx, arg)
require.NoError(t, err)
require.DeepEqual(t, got, pid) // We still get a payload ID even though the state is bad. This means it returns until the end.
}
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -106,17 +43,16 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -262,6 +198,151 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
}
//
//
// A <- B <- C <- D
// \
// ---------- E <- F
// \
// ------ G
// D is the current head, attestations for F and G come late, both are invalid.
// We switch recursively to F then G and finally to D.
//
// We test:
// 1. forkchoice removes blocks F and G from the forkchoice implementation
// 2. forkchoice removes the weights of these blocks
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive_Protoarray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
ba.Block.Body.ExecutionPayload.BlockNumber = 1
wba := util.SaveBlock(t, ctx, beaconDB, ba)
bra, err := wba.Block().HashTreeRoot()
require.NoError(t, err)
bb := util.NewBeaconBlockBellatrix()
bb.Block.Body.ExecutionPayload.BlockNumber = 2
wbb := util.SaveBlock(t, ctx, beaconDB, bb)
brb, err := wbb.Block().HashTreeRoot()
require.NoError(t, err)
bc := util.NewBeaconBlockBellatrix()
bc.Block.Body.ExecutionPayload.BlockNumber = 3
wbc := util.SaveBlock(t, ctx, beaconDB, bc)
brc, err := wbc.Block().HashTreeRoot()
require.NoError(t, err)
bd := util.NewBeaconBlockBellatrix()
pd := [32]byte{'D'}
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
bd.Block.Body.ExecutionPayload.BlockNumber = 4
wbd := util.SaveBlock(t, ctx, beaconDB, bd)
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
be := util.NewBeaconBlockBellatrix()
pe := [32]byte{'E'}
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
be.Block.Body.ExecutionPayload.BlockNumber = 5
wbe := util.SaveBlock(t, ctx, beaconDB, be)
bre, err := wbe.Block().HashTreeRoot()
require.NoError(t, err)
bf := util.NewBeaconBlockBellatrix()
pf := [32]byte{'F'}
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
bf.Block.Body.ExecutionPayload.BlockNumber = 6
bf.Block.ParentRoot = bre[:]
wbf := util.SaveBlock(t, ctx, beaconDB, bf)
brf, err := wbf.Block().HashTreeRoot()
require.NoError(t, err)
bg := util.NewBeaconBlockBellatrix()
bg.Block.Body.ExecutionPayload.BlockNumber = 7
pg := [32]byte{'G'}
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
bg.Block.ParentRoot = bre[:]
wbg := util.SaveBlock(t, ctx, beaconDB, bg)
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
// Insert blocks into forkchoice
service := setupBeaconChain(t, beaconDB)
fcs := protoarray.New()
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
require.NoError(t, err)
require.Equal(t, brg, headRoot)
// Prepare Engine Mock to return invalid unless head is D, LVH = E
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
state: st,
block: wba,
}
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.Equal(t, true, IsInvalidBlock(err))
require.Equal(t, brf, InvalidBlockRoot(err))
// Ensure Head is D
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -496,7 +577,7 @@ func Test_NotifyNewPayload(t *testing.T) {
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
@@ -742,7 +823,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
@@ -785,7 +866,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithStateGen(stategen.New(beaconDB)),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
@@ -827,8 +908,8 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
stateGen := stategen.New(beaconDB)
fcs := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fcs)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
@@ -941,11 +1022,10 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
func TestService_removeInvalidBlockAndState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(doublylinkedtree.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -994,11 +1074,10 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
func TestService_getPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(doublylinkedtree.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -20,7 +19,7 @@ func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}
@@ -38,7 +37,7 @@ func TestService_HeadDomainFetcher_Errors(t *testing.T) {
beaconDB := dbtest.SetupDB(t)
c := &Service{
cfg: &config{
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
StateGen: stategen.New(beaconDB),
},
}
c.head = &head{}

View File

@@ -10,6 +10,7 @@ import (
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -89,7 +90,7 @@ func TestSaveHead_Different(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
@@ -147,7 +148,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
@@ -233,6 +234,64 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
}
func TestSaveOrphanedAtts_NoCommonAncestor_Protoarray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
// this test does not make sense in doubly linked tree since it enforces
// that the finalized node is a common ancestor
service.cfg.ForkChoiceStore = protoarray.New()
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -479,7 +538,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}

View File

@@ -38,14 +38,14 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
if b.Version() == version.Altair || b.Version() == version.Bellatrix || b.Version() == version.EIP4844 {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() == version.Bellatrix {
if b.Version() == version.Bellatrix || b.Version() == version.EIP4844 {
p, err := b.Body().Execution()
if err != nil {
return err
@@ -62,6 +62,13 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
}
}
if b.Version() == version.EIP4844 {
k, err := b.Body().BlobKzgs()
if err != nil {
return err
}
log = log.WithField("blobKzgCount", len(k))
}
log.Info("Finished applying state transition")
return nil
}
@@ -117,24 +124,12 @@ func logPayload(block interfaces.BeaconBlock) error {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
fields := logrus.Fields{
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
fields["blsToExecutionChanges"] = len(changes)
}
log.WithFields(fields).Debug("Synced new payload")
}).Debug("Synced new payload")
return nil
}

View File

@@ -328,7 +328,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
if err != nil {
return err
}
case version.Altair, version.Bellatrix, version.Capella:
case version.Altair, version.Bellatrix, version.EIP4844:
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return err
@@ -338,7 +338,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
return err
}
default:
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
}
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))

View File

@@ -6,17 +6,17 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
fcs := protoarray.New()
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
@@ -100,14 +99,6 @@ func WithSlashingPool(p slashings.PoolManager) Option {
}
}
// WithBLSToExecPool to keep track of BLS to Execution address changes.
func WithBLSToExecPool(p blstoexec.PoolManager) Option {
return func(s *Service) error {
s.cfg.BLSToExecPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
return func(s *Service) error {

View File

@@ -22,21 +22,20 @@ import (
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
//
// def validate_merge_block(block: BeaconBlock) -> None:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBeaconBlock) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
@@ -106,11 +105,10 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
// spec code:
// if TERMINAL_BLOCK_HASH != Hash32():
//
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
return nil
@@ -127,10 +125,9 @@ func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionD
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
//
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
//
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
if !ok {

View File

@@ -10,7 +10,7 @@ import (
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
mocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
@@ -109,10 +109,10 @@ func Test_validateMergeBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
@@ -159,10 +159,10 @@ func Test_validateMergeBlock(t *testing.T) {
func Test_getBlkParentHashAndTD(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)

View File

@@ -19,24 +19,23 @@ import (
// The delay is handled by the caller in `processAttestations`.
//
// Spec pseudocode definition:
// def on_attestation(store: Store, attestation: Attestation) -> None:
// """
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
//
// def on_attestation(store: Store, attestation: Attestation) -> None:
// """
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
// An ``attestation`` that is asserted as invalid may be valid at a later time,
// consider scheduling it for later processing in such case.
// """
// validate_on_attestation(store, attestation)
// store_target_checkpoint_state(store, attestation.data.target)
//
// An ``attestation`` that is asserted as invalid may be valid at a later time,
// consider scheduling it for later processing in such case.
// """
// validate_on_attestation(store, attestation)
// store_target_checkpoint_state(store, attestation.data.target)
// # Get state at the `target` to fully validate attestation
// target_state = store.checkpoint_states[attestation.data.target]
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Get state at the `target` to fully validate attestation
// target_state = store.checkpoint_states[attestation.data.target]
// indexed_attestation = get_indexed_attestation(target_state, attestation)
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
//
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
@@ -21,15 +22,14 @@ import (
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -128,6 +128,142 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
}
func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
require.NoError(t, err)
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 2
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
s, err = util.NewBeaconState()
require.NoError(t, err)
err = s.SetFork(&ethpb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
})
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
tests := []struct {
name string
a *ethpb.Attestation
wantedErr string
}{
{
name: "attestation's data slot not aligned with target vote",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}}),
wantedErr: "target epoch 100 does not match current epoch",
},
{
name: "process nil attestation",
a: nil,
wantedErr: "attestation can't be nil",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "attestation's data can't be nil",
},
{
name: "process nil field (a.Target) in attestation",
a: &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Target: nil,
Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
},
AggregationBits: make([]byte, 1),
Signature: make([]byte, 96),
},
wantedErr: "attestation's target can't be nil",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := service.OnAttestation(ctx, tt.a)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -135,7 +271,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
@@ -164,7 +300,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -226,7 +362,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -256,7 +392,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
require.NoError(t, err)
require.DeepSSZEqual(t, returned.ToProtoUnsafe(), cached.ToProtoUnsafe())
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
@@ -325,6 +461,37 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -332,7 +499,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)

View File

@@ -17,12 +17,14 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blobs"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
@@ -45,53 +47,52 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// computation in this method and methods it calls into.
//
// Spec pseudocode definition:
// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
//
// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
// # Make a copy of the state to avoid mutability issues
// pre_state = copy(store.block_states[block.parent_root])
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
// assert get_current_slot(store) >= block.slot
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
//
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// assert block.slot > finalized_slot
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
//
// # Check the block is valid and compute the post-state
// state = pre_state.copy()
// state_transition(state, signed_block, True)
// # Add new block to the store
// store.blocks[hash_tree_root(block)] = block
// # Add new state for this block to the store
// store.block_states[hash_tree_root(block)] = state
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
//
// # Update justified checkpoint
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
// store.best_justified_checkpoint = state.current_justified_checkpoint
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
// store.justified_checkpoint = state.current_justified_checkpoint
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
//
// # Update finalized checkpoint
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
// store.finalized_checkpoint = state.finalized_checkpoint
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Potentially update justified if different from store
// if store.justified_checkpoint != state.current_justified_checkpoint:
// # Update justified if new justified is later than store justified
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// store.justified_checkpoint = state.current_justified_checkpoint
// return
//
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
// # Update justified if store justified is not in chain with finalized checkpoint
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
@@ -143,13 +144,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
hasSideCar, err := blobs.BlockContainsSidecar(signed)
if err != nil {
return err
}
if hasSideCar {
if err := s.cfg.ForkChoiceStore.SetValidData(ctx, blockRoot); err != nil {
return err
}
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
if err := s.handleBlockBLSToExecChanges(signed.Block()); err != nil {
return errors.Wrap(err, "could not handle block's BLSToExecutionChanges")
}
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -282,11 +289,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return nil
}
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionData, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader interfaces.ExecutionData
var preStateHeader *enginev1.ExecutionPayloadHeader
var err error
preStateVersion := st.Version()
switch preStateVersion {
@@ -300,8 +307,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
blockRoots [][32]byte) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock, blockRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -344,7 +350,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
type versionAndHeader struct {
version int
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
@@ -420,6 +426,20 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
tracing.AnnotateError(span, err)
return err
}
hasSideCar, err := blobs.BlockContainsSidecar(b)
if err != nil {
return err
}
if hasSideCar {
if err := s.cfg.ForkChoiceStore.SetValidData(ctx, blockRoots[i]); err != nil {
return err
}
}
if err := s.saveSidecar(ctx, b); err != nil {
return err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
@@ -437,6 +457,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
// Insert the last block to forkchoice
lastBR := blockRoots[len(blks)-1]
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
@@ -559,22 +580,6 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
return nil
}
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
if blk.Version() < version.Capella {
return nil
}
changes, err := blk.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, change := range changes {
if err := s.cfg.BLSToExecPool.MarkIncluded(change); err != nil {
return errors.Wrap(err, "could not mark BLSToExecutionChange as included")
}
}
return nil
}
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
@@ -591,15 +596,36 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
}
if err := s.saveSidecar(ctx, b); err != nil {
return errors.Wrapf(err, "could not save sidecar from slot %d", b.Block().Slot())
}
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
return nil
}
// Saves sidecar to the DB for the compatible block that contains the sidecar.
func (s *Service) saveSidecar(ctx context.Context, b interfaces.SignedBeaconBlock) error {
ok, err := blobs.BlockContainsSidecar(b)
if err != nil {
return errors.Wrap(err, "could not determine if block contains sidecar")
}
if !ok {
return nil
}
sc, err := b.SideCar()
if err != nil {
return errors.Wrap(err, "could not get sidecar")
}
return s.cfg.BeaconDB.SaveBlobsSidecar(ctx, sc.Message)
}
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
@@ -627,7 +653,7 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
}
// validateMergeTransitionBlock validates the merge transition block.
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) error {
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
// Skip validation if block is older than Bellatrix.
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil
@@ -654,7 +680,11 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// Skip validation if the block is not a merge transition block.
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
empty, err := consensusblocks.IsEmptyExecutionData(stateHeader)
wh, err := consensusblocks.WrappedExecutionPayloadHeader(stateHeader)
if err != nil {
return err
}
empty, err := consensusblocks.IsEmptyExecutionData(wh)
if err != nil {
return err
}
@@ -685,8 +715,25 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
for {
select {
case ti := <-ticker.C:
if err := s.fillMissingBlockPayloadId(ctx, ti); err != nil {
log.WithError(err).Error("Could not fill missing payload ID")
if !atHalfSlot(ti) {
continue
}
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, s.headRoot())
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if has && id == [8]byte{} {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).Error("Could not get head block")
} else {
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {
log.WithError(err).Error("Could not prepare payload on empty ID")
}
}
missedPayloadIDFilledCount.Inc()
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
@@ -701,31 +748,3 @@ func atHalfSlot(t time.Time) bool {
s := params.BeaconConfig().SecondsPerSlot
return uint64(t.Second())%s == s/2
}
func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) error {
if !atHalfSlot(ti) {
return nil
}
if s.CurrentSlot() == s.cfg.ForkChoiceStore.HighestReceivedBlockSlot() {
return nil
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if has && id == [8]byte{} {
missedPayloadIDFilledCount.Inc()
headBlock, err := s.headBlock()
if err != nil {
return err
} else {
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {
return err
}
}
}
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -162,7 +163,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil && !errors.Is(err, doublylinkedtree.ErrNilNode) {
if err != nil && err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return err
}
if !optimistic {
@@ -171,30 +172,24 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
return err
}
}
go func() {
// We do not pass in the parent context from the method as this method call
// is meant to be asynchronous and run in the background rather than being
// tied to the execution of a block.
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
log.WithError(err).Error("could not migrate to cold")
}
}()
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
return nil
}
// ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
//
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()

File diff suppressed because it is too large Load Diff

View File

@@ -135,6 +135,12 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
return nil
}
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
@@ -151,7 +157,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
start = time.Now()
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
log.WithError(err).Warn("Resolving fork due to new attestation")
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))

View File

@@ -226,11 +226,11 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
// Generate attestations for this block in Slot 1
// Generate attestatios for this block in Slot 1
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
require.NoError(t, err)
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
// Verify the target is in forkchoice
// Verify the target is in forchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
// Insert a new block to forkchoice
@@ -253,52 +253,3 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
}
func TestService_UpdateHead_NoAtts(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
fcs := doublylinkedtree.New()
opts = append(opts,
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
copied := genesisState.Copy()
// Generate a new block
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)
// Insert a new block to forkchoice
ojc := &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}
b, err := util.GenerateFullBlock(genesisState, pks, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
b.Block.ParentRoot = service.originBlockRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.Equal(t, 3, fcs.NodeCount())
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, err, service.UpdateHead(ctx))
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, r, service.head.root) // Validate head is the new one
}

View File

@@ -32,9 +32,9 @@ type SlashingReceiver interface {
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()

View File

@@ -8,7 +8,7 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
@@ -127,14 +127,13 @@ func TestService_ReceiveBlock(t *testing.T) {
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
WithStateGen(stategen.New(beaconDB)),
WithFinalizedStateAtStartUp(genesis),
}
s, err := NewService(ctx, opts...)
@@ -167,14 +166,13 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
fc := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithForkChoiceStore(protoarray.New()),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx, opts...)
@@ -244,13 +242,12 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fc := doublylinkedtree.New()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fc),
WithForkChoiceStore(protoarray.New()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB, fc)),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)

View File

@@ -23,7 +23,6 @@ import (
f "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
@@ -71,7 +70,6 @@ type config struct {
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2p p2p.Broadcaster
MaxRoutines int
StateNotifier statefeed.Notifier

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
@@ -83,7 +84,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.InnerStateUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
require.NoError(t, err)
@@ -117,8 +118,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
depositCache, err := depositcache.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
stateGen := stategen.New(beaconDB)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
@@ -129,7 +129,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithForkChoiceStore(doublylinkedtree.New()),
WithAttestationService(attService),
WithStateGen(stateGen),
}
@@ -306,10 +306,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithForkChoiceStore(doublylinkedtree.New()),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
@@ -325,7 +324,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
assert.DeepEqual(t, headBlock, pb, "Head block incorrect")
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
@@ -366,10 +365,9 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithForkChoiceStore(doublylinkedtree.New()),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),
@@ -380,7 +378,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
pb, err := c.head.block.Proto()
require.NoError(t, err)
@@ -390,9 +388,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fc := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: doublylinkedtree.New()},
}
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
@@ -412,6 +409,25 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
}
}
func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
}
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -435,7 +451,7 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
@@ -483,6 +499,27 @@ func BenchmarkHasBlockDB(b *testing.B) {
}
}
func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
require.NoError(b, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
require.NoError(b, s.insertBlockToForkchoiceStore(ctx, wsb.Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
@@ -535,10 +572,9 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx,
WithForkChoiceStore(fc),
WithForkChoiceStore(doublylinkedtree.New()),
WithDatabase(beaconDB),
WithStateGen(stateGen),
WithAttestationService(attSrv),

View File

@@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -72,7 +72,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
fcs := doublylinkedtree.New()
fcs := protoarray.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
wsVerifier: wv,

View File

@@ -20,6 +20,13 @@ var (
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
getStatusLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "get_status_latency_milliseconds",
Help: "Captures RPC latency for get status in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
registerValidatorLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "register_validator_latency_milliseconds",

View File

@@ -72,9 +72,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
}
// Start initializes the service.
func (s *Service) Start() {
go s.pollRelayerStatus(s.ctx)
}
func (*Service) Start() {}
// Stop halts the service.
func (*Service) Stop() error {
@@ -107,12 +105,19 @@ func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32
// Status retrieves the status of the builder relay network.
func (s *Service) Status() error {
ctx, span := trace.StartSpan(context.Background(), "builder.Status")
defer span.End()
start := time.Now()
defer func() {
getStatusLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
// Return early if builder isn't initialized in service.
if s.c == nil {
return nil
}
return nil
return s.c.Status(ctx)
}
// RegisterValidator registers a validator with the builder relay network.
@@ -152,20 +157,3 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
func (s *Service) Configured() bool {
return s.c != nil && !reflect.ValueOf(s.c).IsNil()
}
func (s *Service) pollRelayerStatus(ctx context.Context) {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if s.c != nil {
if err := s.c.Status(ctx); err != nil {
log.WithError(err).Error("Failed to call relayer status endpoint, perhaps mev-boost or relayers are down")
}
}
case <-ctx.Done():
return
}
}
}

View File

@@ -33,9 +33,9 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
s, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
pbState1, err := state_native.ProtobufBeaconStatePhase0(s.ToProtoUnsafe())
pbState1, err := state_native.ProtobufBeaconStatePhase0(s.InnerStateUnsafe())
require.NoError(t, err)
pbstate, err := state_native.ProtobufBeaconStatePhase0(st.ToProtoUnsafe())
pbstate, err := state_native.ProtobufBeaconStatePhase0(st.InnerStateUnsafe())
require.NoError(t, err)
if !proto.Equal(pbState1, pbstate) {
t.Error("incorrectly cached state")
@@ -50,11 +50,11 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
s, err = cache.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.DeepEqual(t, st2.ToProto(), s.ToProto(), "incorrectly cached state")
assert.DeepEqual(t, st2.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
s, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.DeepEqual(t, st.ToProto(), s.ToProto(), "incorrectly cached state")
assert.DeepEqual(t, st.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {

View File

@@ -1,8 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["merkle_tree.go"],
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/cache/depositsnapshot",
visibility = ["//visibility:public"],
)

View File

@@ -1,19 +0,0 @@
package depositsnapshot
const (
DepositContractDepth = 32 // Maximum tree depth as defined by EIP-4881.
)
// MerkleTreeNode is the interface for a Merkle tree.
type MerkleTreeNode interface {
// GetRoot returns the root of the Merkle tree.
GetRoot() [32]byte
// IsFull returns whether there is space left for deposits.
IsFull() bool
// Finalize marks deposits of the Merkle tree as finalized.
Finalize(deposits uint, depth uint) MerkleTreeNode
// GetFinalized returns a list of hashes of all the finalized nodes and the number of deposits.
GetFinalized(result [][32]byte) ([][32]byte, uint)
// PushLeaf adds a new leaf node at the next available Zero node.
PushLeaf(leaf [32]byte, deposits uint, depth uint) MerkleTreeNode
}

View File

@@ -33,5 +33,5 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
res, err := c.Get(ctx, r)
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
assert.DeepEqual(t, res.CloneInnerState(), s.CloneInnerState(), "Expected equal protos to return from cache")
}

View File

@@ -53,7 +53,7 @@ func (c *SyncCommitteeHeadStateCache) Get(slot types.Slot) (state.BeaconState, e
return nil, ErrIncorrectType
}
switch st.Version() {
case version.Altair, version.Bellatrix:
case version.Altair, version.Bellatrix, version.EIP4844:
default:
return nil, ErrIncorrectType
}

View File

@@ -16,7 +16,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl/testnet:__pkg__",
"//testing/endtoend/evaluators:__subpackages__",
"//testing/spectest:__subpackages__",
"//testing/util:__pkg__",
@@ -59,7 +58,6 @@ go_test(
"deposit_test.go",
"epoch_precompute_test.go",
"epoch_spec_test.go",
"exports_test.go",
"reward_test.go",
"sync_committee_test.go",
"transition_test.go",

View File

@@ -82,24 +82,23 @@ func ProcessAttestationNoVerifySignature(
// the proposer in state.
//
// Spec code:
// # Update epoch participation flags
// if data.target.epoch == get_current_epoch(state):
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
//
// # Update epoch participation flags
// if data.target.epoch == get_current_epoch(state):
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
//
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
//
// # Reward proposer
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// # Reward proposer
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
func SetParticipationAndRewardProposer(
ctx context.Context,
beaconState state.BeaconState,
@@ -158,13 +157,12 @@ func AddValidatorFlag(flag, flagPosition uint8) (uint8, error) {
// EpochParticipation sets and returns the proposer reward numerator and epoch participation.
//
// Spec code:
//
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochParticipation []byte, participatedFlags map[uint8]bool, totalBalance uint64) (uint64, []byte, error) {
cfg := params.BeaconConfig()
sourceFlagIndex := cfg.TimelySourceFlagIndex
@@ -220,10 +218,9 @@ func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochPa
// RewardProposer rewards proposer by increasing proposer's balance with input reward numerator and calculated reward denominator.
//
// Spec code:
//
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
func RewardProposer(ctx context.Context, beaconState state.BeaconState, proposerRewardNumerator uint64) error {
cfg := params.BeaconConfig()
d := (cfg.WeightDenominator - cfg.ProposerWeight) * cfg.WeightDenominator / cfg.ProposerWeight
@@ -241,32 +238,31 @@ func RewardProposer(ctx context.Context, beaconState state.BeaconState, proposer
//
// Spec code:
// def get_attestation_participation_flag_indices(state: BeaconState,
// data: AttestationData,
// inclusion_delay: uint64) -> Sequence[int]:
// """
// Return the flag indices that are satisfied by an attestation.
// """
// if data.target.epoch == get_current_epoch(state):
// justified_checkpoint = state.current_justified_checkpoint
// else:
// justified_checkpoint = state.previous_justified_checkpoint
//
// data: AttestationData,
// inclusion_delay: uint64) -> Sequence[int]:
// """
// Return the flag indices that are satisfied by an attestation.
// """
// if data.target.epoch == get_current_epoch(state):
// justified_checkpoint = state.current_justified_checkpoint
// else:
// justified_checkpoint = state.previous_justified_checkpoint
// # Matching roots
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
// assert is_matching_source
//
// # Matching roots
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
// assert is_matching_source
// participation_flag_indices = []
// if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
// participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
// if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH:
// participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
// if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
// participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
//
// participation_flag_indices = []
// if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
// participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
// if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH:
// participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
// if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
// participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
//
// return participation_flag_indices
// return participation_flag_indices
func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *ethpb.AttestationData, delay types.Slot) (map[uint8]bool, error) {
currEpoch := time.CurrentEpoch(beaconState)
var justifiedCheckpt *ethpb.Checkpoint
@@ -308,10 +304,9 @@ func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *et
// MatchingStatus returns the matching statues for attestation data's source target and head.
//
// Spec code:
//
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
// is_matching_source = data.source == justified_checkpoint
// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
func MatchingStatus(beaconState state.BeaconState, data *ethpb.AttestationData, cp *ethpb.Checkpoint) (matchedSrc, matchedTgt, matchedHead bool, err error) {
matchedSrc = attestation.CheckPointIsEqual(data.Source, cp)

View File

@@ -9,6 +9,7 @@ import (
p2pType "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -19,33 +20,32 @@ import (
//
// Spec code:
// def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None:
// # Verify sync committee aggregate signature signing over the previous slot block root
// committee_pubkeys = state.current_sync_committee.pubkeys
// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
// previous_slot = max(state.slot, Slot(1)) - Slot(1)
// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
//
// # Verify sync committee aggregate signature signing over the previous slot block root
// committee_pubkeys = state.current_sync_committee.pubkeys
// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
// previous_slot = max(state.slot, Slot(1)) - Slot(1)
// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
// # Compute participant and proposer rewards
// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
//
// # Compute participant and proposer rewards
// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
//
// # Apply participant and proposer rewards
// all_pubkeys = [v.pubkey for v in state.validators]
// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
// if participation_bit:
// increase_balance(state, participant_index, participant_reward)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
// # Apply participant and proposer rewards
// all_pubkeys = [v.pubkey for v in state.validators]
// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
// if participation_bit:
// increase_balance(state, participant_index, participant_reward)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
votedKeys, votedIndices, didntVoteIndices, err := FilterSyncCommitteeVotes(s, sync)
if err != nil {
return nil, errors.Wrap(err, "could not filter sync committee votes")
}
@@ -53,70 +53,50 @@ func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, errors.Wrap(err, "could not verify sync committee signature")
}
return s, nil
return ApplySyncRewardsPenalties(ctx, s, votedIndices, didntVoteIndices)
}
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
// public keys that voted, for future signature verification.
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
state.BeaconState,
[]bls.PublicKey,
error) {
// FilterSyncCommitteeVotes filters the validator public keys and indices for the ones that voted and didn't vote.
func FilterSyncCommitteeVotes(s state.BeaconState, sync *ethpb.SyncAggregate) (
votedKeys []bls.PublicKey,
votedIndices []types.ValidatorIndex,
didntVoteIndices []types.ValidatorIndex,
err error) {
currentSyncCommittee, err := s.CurrentSyncCommittee()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if currentSyncCommittee == nil {
return nil, nil, errors.New("nil current sync committee in state")
return nil, nil, nil, errors.New("nil current sync committee in state")
}
committeeKeys := currentSyncCommittee.Pubkeys
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
return nil, nil, errors.New("bits length exceeds committee length")
return nil, nil, nil, errors.New("bits length exceeds committee length")
}
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
votedKeys = make([]bls.PublicKey, 0, len(committeeKeys))
votedIndices = make([]types.ValidatorIndex, 0, len(committeeKeys))
didntVoteIndices = make([]types.ValidatorIndex, 0) // No allocation. Expect most votes.
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, nil, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, nil, err
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, nil, err
}
earnedProposerReward := uint64(0)
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, nil, errors.New("validator public key does not exist in state")
return nil, nil, nil, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
votedKeys = append(votedKeys, pubKey)
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
}
earnedProposerReward += proposerReward
votedIndices = append(votedIndices, vIdx)
} else {
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
}
didntVoteIndices = append(didntVoteIndices, vIdx)
}
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, nil, err
}
return s, votedKeys, err
return
}
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
@@ -145,6 +125,43 @@ func VerifySyncCommitteeSig(s state.BeaconState, syncKeys []bls.PublicKey, syncS
return nil
}
// ApplySyncRewardsPenalties applies rewards and penalties for proposer and sync committee participants.
func ApplySyncRewardsPenalties(ctx context.Context, s state.BeaconState, votedIndices, didntVoteIndices []types.ValidatorIndex) (state.BeaconState, error) {
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, err
}
// Apply sync committee rewards.
earnedProposerReward := uint64(0)
for _, index := range votedIndices {
if err := helpers.IncreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
earnedProposerReward += proposerReward
}
// Apply proposer rewards.
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, err
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, err
}
// Apply sync committee penalties.
for _, index := range didntVoteIndices {
if err := helpers.DecreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
}
return s, nil
}
// SyncRewards returns the proposer reward and the sync participant reward given the total active balance in state.
func SyncRewards(activeBalance uint64) (proposerReward, participantReward uint64, err error) {
cfg := params.BeaconConfig()

View File

@@ -168,36 +168,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
require.NoError(t, err)
}
// This is a regression test #11696
func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
committeeKeys := committee.Pubkeys
committeeKeys[1] = committeeKeys[0]
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
idx, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[0]))
require.Equal(t, true, ok)
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xFF
}
syncBits.SetBitAt(0, false)
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
}
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
require.Equal(t, 511, len(votedKeys))
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
balances := st.Balances()
require.Equal(t, uint64(988), balances[idx])
}
func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
func TestProcessSyncCommittee_FilterSyncCommitteeVotes(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
@@ -212,40 +183,25 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
SyncCommitteeBits: syncBits,
}
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
votedKeys, votedIndices, didntVoteIndices, err := altair.FilterSyncCommitteeVotes(beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
for _, key := range votedKeys {
votedMap[bytesutil.ToBytes48(key.Marshal())] = true
}
require.Equal(t, int(syncBits.Len()/2), len(votedKeys))
currentSyncCommittee, err := st.CurrentSyncCommittee()
require.NoError(t, err)
committeeKeys := currentSyncCommittee.Pubkeys
balances := st.Balances()
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, int(syncBits.Len()/2), len(votedIndices))
require.Equal(t, int(syncBits.Len()/2), len(didntVoteIndices))
for i := 0; i < len(syncBits); i++ {
if syncBits.BitAt(uint64(i)) {
pk := bytesutil.ToBytes48(committeeKeys[i])
pk := beaconState.PubkeyAtIndex(votedIndices[i])
require.DeepEqual(t, true, votedMap[pk])
idx, ok := st.ValidatorIndexByPubkey(pk)
require.Equal(t, true, ok)
require.Equal(t, uint64(32000000988), balances[idx])
} else {
pk := bytesutil.ToBytes48(committeeKeys[i])
pk := beaconState.PubkeyAtIndex(didntVoteIndices[i])
require.DeepEqual(t, false, votedMap[pk])
idx, ok := st.ValidatorIndexByPubkey(pk)
require.Equal(t, true, ok)
if idx != proposerIndex {
require.Equal(t, uint64(31999999012), balances[idx])
}
}
}
require.Equal(t, uint64(32000035108), balances[proposerIndex])
}
func Test_VerifySyncCommitteeSig(t *testing.T) {
@@ -284,6 +240,22 @@ func Test_VerifySyncCommitteeSig(t *testing.T) {
require.NoError(t, altair.VerifySyncCommitteeSig(beaconState, pks, aggregatedSig))
}
func Test_ApplySyncRewardsPenalties(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
beaconState, err := altair.ApplySyncRewardsPenalties(context.Background(), beaconState,
[]types.ValidatorIndex{0, 1}, // voted
[]types.ValidatorIndex{2, 3}) // didn't vote
require.NoError(t, err)
balances := beaconState.Balances()
require.Equal(t, uint64(32000000988), balances[0])
require.Equal(t, balances[0], balances[1])
require.Equal(t, uint64(31999999012), balances[2])
require.Equal(t, balances[2], balances[3])
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, uint64(32000000282), balances[proposerIndex])
}
func Test_SyncRewards(t *testing.T) {
tests := []struct {
name string

View File

@@ -68,10 +68,9 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
// For fully inactive validators and perfect active validators, the effect is the same as before Altair.
// For a validator is inactive and the chain fails to finalize, the inactivity score increases by a fixed number, the total loss after N epochs is proportional to N**2/2.
// For imperfectly active validators. The inactivity score's behavior is specified by this function:
//
// If a validator fails to submit an attestation with the correct target, their inactivity score goes up by 4.
// If they successfully submit an attestation with the correct source and target, their inactivity score drops by 1
// If the chain has recently finalized, each validator's score drops by 16.
// If a validator fails to submit an attestation with the correct target, their inactivity score goes up by 4.
// If they successfully submit an attestation with the correct source and target, their inactivity score drops by 1
// If the chain has recently finalized, each validator's score drops by 16.
func ProcessInactivityScores(
ctx context.Context,
beaconState state.BeaconState,
@@ -133,13 +132,12 @@ func ProcessInactivityScores(
// it also tracks and updates epoch attesting balances.
// Spec code:
// if epoch == get_current_epoch(state):
//
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
// active_validator_indices = get_active_validator_indices(state, epoch)
// participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
// return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
// active_validator_indices = get_active_validator_indices(state, epoch)
// participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
// return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
func ProcessEpochParticipation(
ctx context.Context,
beaconState state.BeaconState,

View File

@@ -14,11 +14,10 @@ import (
//
// Spec code:
// def process_sync_committee_updates(state: BeaconState) -> None:
//
// next_epoch = get_current_epoch(state) + Epoch(1)
// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
// state.current_sync_committee = state.next_sync_committee
// state.next_sync_committee = get_next_sync_committee(state)
// next_epoch = get_current_epoch(state) + Epoch(1)
// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
// state.current_sync_committee = state.next_sync_committee
// state.next_sync_committee = get_next_sync_committee(state)
func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
nextEpoch := time.NextEpoch(beaconState)
if nextEpoch%params.BeaconConfig().EpochsPerSyncCommitteePeriod == 0 {
@@ -47,9 +46,8 @@ func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconSt
//
// Spec code:
// def process_participation_flag_updates(state: BeaconState) -> None:
//
// state.previous_epoch_participation = state.current_epoch_participation
// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
// state.previous_epoch_participation = state.current_epoch_participation
// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
func ProcessParticipationFlagUpdates(beaconState state.BeaconState) (state.BeaconState, error) {
c, err := beaconState.CurrentEpochParticipation()
if err != nil {

View File

@@ -1,3 +0,0 @@
package altair
var ProcessSyncAggregateEported = processSyncAggregate

View File

@@ -13,17 +13,16 @@ import (
// individual validator's base reward.
//
// Spec code:
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
// """
// Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
//
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
// """
// Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
//
// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
// and sync committees).
// """
// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
// return Gwei(increments * get_base_reward_per_increment(state))
// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
// and sync committees).
// """
// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
// return Gwei(increments * get_base_reward_per_increment(state))
func BaseReward(s state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) {
totalBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
@@ -51,8 +50,7 @@ func BaseRewardWithTotalBalance(s state.ReadOnlyBeaconState, index types.Validat
//
// Spec code:
// def get_base_reward_per_increment(state: BeaconState) -> Gwei:
//
// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
func BaseRewardPerIncrement(activeBalance uint64) (uint64, error) {
if activeBalance == 0 {
return 0, errors.New("active balance can't be 0")

View File

@@ -47,14 +47,13 @@ func ValidateNilSyncContribution(s *ethpb.SignedContributionAndProof) error {
//
// Spec code:
// def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
//
// """
// Return the next sync committee, with possible pubkey duplicates.
// """
// indices = get_next_sync_committee_indices(state)
// pubkeys = [state.validators[index].pubkey for index in indices]
// aggregate_pubkey = bls.AggregatePKs(pubkeys)
// return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
// """
// Return the next sync committee, with possible pubkey duplicates.
// """
// indices = get_next_sync_committee_indices(state)
// pubkeys = [state.validators[index].pubkey for index in indices]
// aggregate_pubkey = bls.AggregatePKs(pubkeys)
// return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCommittee, error) {
indices, err := NextSyncCommitteeIndices(ctx, s)
if err != nil {
@@ -79,27 +78,26 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom
//
// Spec code:
// def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
// """
// Return the sync committee indices, with possible duplicates, for the next sync committee.
// """
// epoch = Epoch(get_current_epoch(state) + 1)
//
// """
// Return the sync committee indices, with possible duplicates, for the next sync committee.
// """
// epoch = Epoch(get_current_epoch(state) + 1)
//
// MAX_RANDOM_BYTE = 2**8 - 1
// active_validator_indices = get_active_validator_indices(state, epoch)
// active_validator_count = uint64(len(active_validator_indices))
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
// i = 0
// sync_committee_indices: List[ValidatorIndex] = []
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
// candidate_index = active_validator_indices[shuffled_index]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
// sync_committee_indices.append(candidate_index)
// i += 1
// return sync_committee_indices
// MAX_RANDOM_BYTE = 2**8 - 1
// active_validator_indices = get_active_validator_indices(state, epoch)
// active_validator_count = uint64(len(active_validator_indices))
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
// i = 0
// sync_committee_indices: List[ValidatorIndex] = []
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
// candidate_index = active_validator_indices[shuffled_index]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
// sync_committee_indices.append(candidate_index)
// i += 1
// return sync_committee_indices
func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]types.ValidatorIndex, error) {
epoch := coreTime.NextEpoch(s)
indices, err := helpers.ActiveValidatorIndices(ctx, s, epoch)
@@ -146,19 +144,18 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]types
// SyncSubCommitteePubkeys returns the pubkeys participating in a sync subcommittee.
//
// def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
// # Committees assigned to `slot` sign for `slot - 1`
// # This creates the exceptional logic below when transitioning between sync committee periods
// next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
// if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
// sync_committee = state.current_sync_committee
// else:
// sync_committee = state.next_sync_committee
//
// # Committees assigned to `slot` sign for `slot - 1`
// # This creates the exceptional logic below when transitioning between sync committee periods
// next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
// if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
// sync_committee = state.current_sync_committee
// else:
// sync_committee = state.next_sync_committee
//
// # Return pubkeys for the subcommittee index
// sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
// i = subcommittee_index * sync_subcommittee_size
// return sync_committee.pubkeys[i:i + sync_subcommittee_size]
// # Return pubkeys for the subcommittee index
// sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
// i = subcommittee_index * sync_subcommittee_size
// return sync_committee.pubkeys[i:i + sync_subcommittee_size]
func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types.CommitteeIndex) ([][]byte, error) {
cfg := params.BeaconConfig()
subCommSize := cfg.SyncCommitteeSize / cfg.SyncCommitteeSubnetCount
@@ -175,9 +172,8 @@ func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types
// aggregator.
//
// def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
//
// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
if len(sig) != fieldparams.BLSSignatureLength {
return false, errors.New("incorrect sig length")

View File

@@ -15,19 +15,18 @@ import (
//
// Spec code:
// def process_epoch(state: BeaconState) -> None:
//
// process_justification_and_finalization(state) # [Modified in Altair]
// process_inactivity_updates(state) # [New in Altair]
// process_rewards_and_penalties(state) # [Modified in Altair]
// process_registry_updates(state)
// process_slashings(state) # [Modified in Altair]
// process_eth1_data_reset(state)
// process_effective_balance_updates(state)
// process_slashings_reset(state)
// process_randao_mixes_reset(state)
// process_historical_roots_update(state)
// process_participation_flag_updates(state) # [New in Altair]
// process_sync_committee_updates(state) # [New in Altair]
// process_justification_and_finalization(state) # [Modified in Altair]
// process_inactivity_updates(state) # [New in Altair]
// process_rewards_and_penalties(state) # [Modified in Altair]
// process_registry_updates(state)
// process_slashings(state) # [Modified in Altair]
// process_eth1_data_reset(state)
// process_effective_balance_updates(state)
// process_slashings_reset(state)
// process_randao_mixes_reset(state)
// process_historical_roots_update(state)
// process_participation_flag_updates(state) # [New in Altair]
// process_sync_committee_updates(state) # [New in Altair]
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
defer span.End()

View File

@@ -16,53 +16,52 @@ import (
//
// Spec code:
// def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
// epoch = phase0.get_current_epoch(pre)
// post = BeaconState(
// # Versioning
// genesis_time=pre.genesis_time,
// genesis_validators_root=pre.genesis_validators_root,
// slot=pre.slot,
// fork=Fork(
// previous_version=pre.fork.current_version,
// current_version=ALTAIR_FORK_VERSION,
// epoch=epoch,
// ),
// # History
// latest_block_header=pre.latest_block_header,
// block_roots=pre.block_roots,
// state_roots=pre.state_roots,
// historical_roots=pre.historical_roots,
// # Eth1
// eth1_data=pre.eth1_data,
// eth1_data_votes=pre.eth1_data_votes,
// eth1_deposit_index=pre.eth1_deposit_index,
// # Registry
// validators=pre.validators,
// balances=pre.balances,
// # Randomness
// randao_mixes=pre.randao_mixes,
// # Slashings
// slashings=pre.slashings,
// # Participation
// previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
// current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
// # Finality
// justification_bits=pre.justification_bits,
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
// current_justified_checkpoint=pre.current_justified_checkpoint,
// finalized_checkpoint=pre.finalized_checkpoint,
// # Inactivity
// inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
// )
// # Fill in previous epoch participation from the pre state's pending attestations
// translate_participation(post, pre.previous_epoch_attestations)
//
// epoch = phase0.get_current_epoch(pre)
// post = BeaconState(
// # Versioning
// genesis_time=pre.genesis_time,
// genesis_validators_root=pre.genesis_validators_root,
// slot=pre.slot,
// fork=Fork(
// previous_version=pre.fork.current_version,
// current_version=ALTAIR_FORK_VERSION,
// epoch=epoch,
// ),
// # History
// latest_block_header=pre.latest_block_header,
// block_roots=pre.block_roots,
// state_roots=pre.state_roots,
// historical_roots=pre.historical_roots,
// # Eth1
// eth1_data=pre.eth1_data,
// eth1_data_votes=pre.eth1_data_votes,
// eth1_deposit_index=pre.eth1_deposit_index,
// # Registry
// validators=pre.validators,
// balances=pre.balances,
// # Randomness
// randao_mixes=pre.randao_mixes,
// # Slashings
// slashings=pre.slashings,
// # Participation
// previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
// current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
// # Finality
// justification_bits=pre.justification_bits,
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
// current_justified_checkpoint=pre.current_justified_checkpoint,
// finalized_checkpoint=pre.finalized_checkpoint,
// # Inactivity
// inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
// )
// # Fill in previous epoch participation from the pre state's pending attestations
// translate_participation(post, pre.previous_epoch_attestations)
//
// # Fill in sync committees
// # Note: A duplicate committee is assigned for the current and next committee at the fork boundary
// post.current_sync_committee = get_next_sync_committee(post)
// post.next_sync_committee = get_next_sync_committee(post)
// return post
// # Fill in sync committees
// # Note: A duplicate committee is assigned for the current and next committee at the fork boundary
// post.current_sync_committee = get_next_sync_committee(post)
// post.next_sync_committee = get_next_sync_committee(post)
// return post
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
@@ -127,18 +126,17 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
//
// Spec code:
// def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None:
// for attestation in pending_attestations:
// data = attestation.data
// inclusion_delay = attestation.inclusion_delay
// # Translate attestation inclusion info to flag indices
// participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay)
//
// for attestation in pending_attestations:
// data = attestation.data
// inclusion_delay = attestation.inclusion_delay
// # Translate attestation inclusion info to flag indices
// participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay)
//
// # Apply flags to all attesting validators
// epoch_participation = state.previous_epoch_participation
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index in participation_flag_indices:
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// # Apply flags to all attesting validators
// epoch_participation = state.previous_epoch_participation
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index in participation_flag_indices:
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
func TranslateParticipation(ctx context.Context, state state.BeaconState, atts []*ethpb.PendingAttestation) (state.BeaconState, error) {
epochParticipation, err := state.PreviousEpochParticipation()
if err != nil {

View File

@@ -41,8 +41,8 @@ go_library(
"//contracts/deposit:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/hash/htr:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
@@ -70,7 +70,6 @@ go_test(
"deposit_test.go",
"eth1_data_test.go",
"exit_test.go",
"exports_test.go",
"genesis_test.go",
"header_test.go",
"payload_test.go",
@@ -94,12 +93,10 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/hash/htr:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -185,20 +185,19 @@ func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyB
// VerifyIndexedAttestation determines the validity of an indexed attestation.
//
// Spec pseudocode definition:
//
// def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
// """
// Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
// """
// # Verify indices are sorted and unique
// indices = indexed_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(set(indices)):
// return False
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
// def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
// """
// Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
// """
// # Verify indices are sorted and unique
// indices = indexed_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(set(indices)):
// return False
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
defer span.End()

View File

@@ -22,21 +22,20 @@ import (
// Casper FFG slashing conditions if any slashable events occurred.
//
// Spec pseudocode definition:
// def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
// attestation_1 = attester_slashing.attestation_1
// attestation_2 = attester_slashing.attestation_2
// assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
// assert is_valid_indexed_attestation(state, attestation_1)
// assert is_valid_indexed_attestation(state, attestation_2)
//
// def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
// attestation_1 = attester_slashing.attestation_1
// attestation_2 = attester_slashing.attestation_2
// assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
// assert is_valid_indexed_attestation(state, attestation_1)
// assert is_valid_indexed_attestation(state, attestation_2)
//
// slashed_any = False
// indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
// for index in sorted(indices):
// if is_slashable_validator(state.validators[index], get_current_epoch(state)):
// slash_validator(state, index)
// slashed_any = True
// assert slashed_any
// slashed_any = False
// indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
// for index in sorted(indices):
// if is_slashable_validator(state.validators[index], get_current_epoch(state)):
// slash_validator(state, index)
// slashed_any = True
// assert slashed_any
func ProcessAttesterSlashings(
ctx context.Context,
beaconState state.BeaconState,
@@ -84,7 +83,7 @@ func ProcessAttesterSlashing(
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() == version.Bellatrix, beaconState.Version() == version.Capella:
case beaconState.Version() == version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")
@@ -133,17 +132,16 @@ func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaco
// IsSlashableAttestationData verifies a slashing against the Casper Proof of Stake FFG rules.
//
// Spec pseudocode definition:
//
// def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
// """
// Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
// """
// return (
// # Double vote
// (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
// # Surround vote
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
// )
// def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
// """
// Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
// """
// return (
// # Double vote
// (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
// # Surround vote
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
// )
func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
return false

View File

@@ -302,72 +302,3 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
require.Equal(t, uint64(31000000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateCapella(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, beaconState.SetSlot(currentSlot))
b := util.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, b.Block.Body.AttesterSlashings, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
require.Equal(t, uint64(31000000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}

View File

@@ -71,9 +71,8 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
// into the beacon chain.
//
// Spec pseudocode definition:
//
// For each deposit in block.body.deposits:
// process_deposit(state, deposit)
// For each deposit in block.body.deposits:
// process_deposit(state, deposit)
func ProcessDeposits(
ctx context.Context,
beaconState state.BeaconState,
@@ -121,41 +120,40 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi
//
// Spec pseudocode definition:
// def process_deposit(state: BeaconState, deposit: Deposit) -> None:
// # Verify the Merkle branch
// assert is_valid_merkle_branch(
// leaf=hash_tree_root(deposit.data),
// branch=deposit.proof,
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
// index=state.eth1_deposit_index,
// root=state.eth1_data.deposit_root,
// )
//
// # Verify the Merkle branch
// assert is_valid_merkle_branch(
// leaf=hash_tree_root(deposit.data),
// branch=deposit.proof,
// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
// index=state.eth1_deposit_index,
// root=state.eth1_data.deposit_root,
// )
// # Deposits must be processed in order
// state.eth1_deposit_index += 1
//
// # Deposits must be processed in order
// state.eth1_deposit_index += 1
// pubkey = deposit.data.pubkey
// amount = deposit.data.amount
// validator_pubkeys = [v.pubkey for v in state.validators]
// if pubkey not in validator_pubkeys:
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
// deposit_message = DepositMessage(
// pubkey=deposit.data.pubkey,
// withdrawal_credentials=deposit.data.withdrawal_credentials,
// amount=deposit.data.amount,
// )
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
// signing_root = compute_signing_root(deposit_message, domain)
// if not bls.Verify(pubkey, signing_root, deposit.data.signature):
// return
//
// pubkey = deposit.data.pubkey
// amount = deposit.data.amount
// validator_pubkeys = [v.pubkey for v in state.validators]
// if pubkey not in validator_pubkeys:
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
// deposit_message = DepositMessage(
// pubkey=deposit.data.pubkey,
// withdrawal_credentials=deposit.data.withdrawal_credentials,
// amount=deposit.data.amount,
// )
// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
// signing_root = compute_signing_root(deposit_message, domain)
// if not bls.Verify(pubkey, signing_root, deposit.data.signature):
// return
//
// # Add validator and balance entries
// state.validators.append(get_validator_from_deposit(state, deposit))
// state.balances.append(amount)
// else:
// # Increase balance by deposit amount
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
// increase_balance(state, index, amount)
// # Add validator and balance entries
// state.validators.append(get_validator_from_deposit(state, deposit))
// state.balances.append(amount)
// else:
// # Increase balance by deposit amount
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
// increase_balance(state, index, amount)
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, bool, error) {
var newValidator bool
if err := verifyDeposit(beaconState, deposit); err != nil {

View File

@@ -6,8 +6,3 @@ var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange m
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
var errInvalidWithdrawalIndex = errors.New("invalid withdrawal index")
var errInvalidValidatorIndex = errors.New("invalid validator index")
var errInvalidWithdrawalAmount = errors.New("invalid withdrawal amount")
var errInvalidExecutionAddress = errors.New("invalid execution address")
var errInvalidWithdrawalNumber = errors.New("invalid number of withdrawals")

View File

@@ -15,11 +15,10 @@ import (
// into the beacon state.
//
// Official spec definition:
//
// def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
// state.eth1_data_votes.append(body.eth1_data)
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
// state.eth1_data = body.eth1_data
// def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
// state.eth1_data_votes.append(body.eth1_data)
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
// state.eth1_data = body.eth1_data
func ProcessEth1DataInBlock(_ context.Context, beaconState state.BeaconState, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
if beaconState == nil || beaconState.IsNil() {
return nil, errors.New("nil state")

View File

@@ -27,24 +27,23 @@ var ValidatorCannotExitYetMsg = "validator has not been active long enough to ex
// should exit the state's validator registry.
//
// Spec pseudocode definition:
//
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
func ProcessVoluntaryExits(
ctx context.Context,
beaconState state.BeaconState,
@@ -72,24 +71,23 @@ func ProcessVoluntaryExits(
// VerifyExitAndSignature implements the spec defined validation for voluntary exits.
//
// Spec pseudocode definition:
//
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
currentSlot types.Slot,
@@ -119,24 +117,23 @@ func VerifyExitAndSignature(
// verifyExitConditions implements the spec defined validation for voluntary exits(excluding signatures).
//
// Spec pseudocode definition:
//
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
// voluntary_exit = signed_voluntary_exit.message
// validator = state.validators[voluntary_exit.validator_index]
// # Verify the validator is active
// assert is_active_validator(validator, get_current_epoch(state))
// # Verify exit has not been initiated
// assert validator.exit_epoch == FAR_FUTURE_EPOCH
// # Exits must specify an epoch when they become valid; they are not valid before then
// assert get_current_epoch(state) >= voluntary_exit.epoch
// # Verify the validator has been active long enough
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
// # Verify signature
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
// signing_root = compute_signing_root(voluntary_exit, domain)
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
func verifyExitConditions(validator state.ReadOnlyValidator, currentSlot types.Slot, exit *ethpb.VoluntaryExit) error {
currentEpoch := slots.ToEpoch(currentSlot)
// Verify the validator is active.

View File

@@ -1,3 +0,0 @@
package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange

View File

@@ -18,27 +18,27 @@ import (
//
// Spec pseudocode definition:
//
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header
// assert block.slot > state.latest_block_header.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Cache current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// proposer_index=block.proposer_index,
// parent_root=block.parent_root,
// state_root=Bytes32(), # Overwritten in the next process_slot call
// body_root=hash_tree_root(block.body),
// )
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header
// assert block.slot > state.latest_block_header.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Cache current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// proposer_index=block.proposer_index,
// parent_root=block.parent_root,
// state_root=Bytes32(), # Overwritten in the next process_slot call
// body_root=hash_tree_root(block.body),
// )
//
// # Verify proposer is not slashed
// proposer = state.validators[block.proposer_index]
// assert not proposer.slashed
// # Verify proposer is not slashed
// proposer = state.validators[block.proposer_index]
// assert not proposer.slashed
func ProcessBlockHeader(
ctx context.Context,
beaconState state.BeaconState,
@@ -73,28 +73,27 @@ func ProcessBlockHeader(
// using a unsigned block.
//
// Spec pseudocode definition:
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header
// assert block.slot > state.latest_block_header.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Cache current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// proposer_index=block.proposer_index,
// parent_root=block.parent_root,
// state_root=Bytes32(), # Overwritten in the next process_slot call
// body_root=hash_tree_root(block.body),
// )
//
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header
// assert block.slot > state.latest_block_header.slot
// # Verify that proposer index is the correct index
// assert block.proposer_index == get_beacon_proposer_index(state)
// # Verify that the parent matches
// assert block.parent_root == hash_tree_root(state.latest_block_header)
// # Cache current block as the new latest block
// state.latest_block_header = BeaconBlockHeader(
// slot=block.slot,
// proposer_index=block.proposer_index,
// parent_root=block.parent_root,
// state_root=Bytes32(), # Overwritten in the next process_slot call
// body_root=hash_tree_root(block.body),
// )
//
// # Verify proposer is not slashed
// proposer = state.validators[block.proposer_index]
// assert not proposer.slashed
// # Verify proposer is not slashed
// proposer = state.validators[block.proposer_index]
// assert not proposer.slashed
func ProcessBlockHeaderNoVerify(
ctx context.Context,
beaconState state.BeaconState,

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
@@ -25,8 +26,7 @@ var (
//
// Spec code:
// def is_merge_transition_complete(state: BeaconState) -> bool:
//
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
if st == nil {
return false, errors.New("nil state")
@@ -34,14 +34,15 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
if st.Version() > version.Bellatrix {
return true, nil
}
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
}
isEmpty, err := blocks.IsEmptyExecutionData(h)
wrappedHeader, err := blocks.WrappedExecutionPayloadHeader(h)
if err != nil {
return false, err
}
isEmpty, err := blocks.IsEmptyExecutionData(wrappedHeader)
if err != nil {
return false, err
}
@@ -52,8 +53,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
//
// Spec code:
// def is_execution_block(block: BeaconBlock) -> bool:
//
// return block.body.execution_payload != ExecutionPayload()
// return block.body.execution_payload != ExecutionPayload()
func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
@@ -78,8 +78,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
//
// Spec code:
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
//
// return is_merge_block(state, body) or is_merge_complete(state)
// return is_merge_block(state, body) or is_merge_complete(state)
func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (bool, error) {
if st == nil || body == nil {
return false, errors.New("nil state or block body")
@@ -96,8 +95,12 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionData, body interfaces.BeaconBlockBody) (bool, error) {
isEmpty, err := blocks.IsEmptyExecutionData(header)
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
wrappedHeader, err := blocks.WrappedExecutionPayloadHeader(header)
if err != nil {
return false, err
}
isEmpty, err := blocks.IsEmptyExecutionData(wrappedHeader)
if err != nil {
return false, err
}
@@ -116,10 +119,9 @@ func IsPreBellatrixVersion(v int) bool {
// These validation steps ONLY apply to post merge.
//
// Spec code:
//
// # Verify consistency of the parent hash with respect to the previous execution payload header
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
// # Verify consistency of the parent hash with respect to the previous execution payload header
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
complete, err := IsMergeTransitionComplete(st)
if err != nil {
@@ -128,11 +130,12 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
if !complete {
return nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return err
}
if !bytes.Equal(payload.ParentHash(), header.BlockHash()) {
if !bytes.Equal(payload.ParentHash(), header.BlockHash) {
return ErrInvalidPayloadBlockHash
}
return nil
@@ -142,11 +145,10 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
// These validation steps apply to both pre merge and post merge.
//
// Spec code:
//
// # Verify random
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
// # Verify timestamp
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
// # Verify random
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
// # Verify timestamp
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
@@ -172,51 +174,48 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
//
// Spec code:
// def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
//
// # Verify consistency of the parent hash with respect to the previous execution payload header
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
// # Verify random
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
// # Verify timestamp
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
// # Verify the execution payload is valid
// assert execution_engine.execute_payload(payload)
// # Cache execution payload header
// state.latest_execution_payload_header = ExecutionPayloadHeader(
// parent_hash=payload.parent_hash,
// FeeRecipient=payload.FeeRecipient,
// state_root=payload.state_root,
// receipt_root=payload.receipt_root,
// logs_bloom=payload.logs_bloom,
// random=payload.random,
// block_number=payload.block_number,
// gas_limit=payload.gas_limit,
// gas_used=payload.gas_used,
// timestamp=payload.timestamp,
// extra_data=payload.extra_data,
// base_fee_per_gas=payload.base_fee_per_gas,
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
// # Verify consistency of the parent hash with respect to the previous execution payload header
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
// # Verify random
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
// # Verify timestamp
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
// # Verify the execution payload is valid
// assert execution_engine.execute_payload(payload)
// # Cache execution payload header
// state.latest_execution_payload_header = ExecutionPayloadHeader(
// parent_hash=payload.parent_hash,
// FeeRecipient=payload.FeeRecipient,
// state_root=payload.state_root,
// receipt_root=payload.receipt_root,
// logs_bloom=payload.logs_bloom,
// random=payload.random,
// block_number=payload.block_number,
// gas_limit=payload.gas_limit,
// gas_used=payload.gas_used,
// timestamp=payload.timestamp,
// extra_data=payload.extra_data,
// base_fee_per_gas=payload.base_fee_per_gas,
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
if st.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get payload withdrawals")
}
st, err = ProcessWithdrawals(st, withdrawals)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
return nil, err
}
if err := ValidatePayload(st, payload); err != nil {
return nil, err
}
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
header, err := blocks.PayloadToHeader(payload)
if err != nil {
return nil, err
}
wrappedHeader, err := blocks.WrappedExecutionPayloadHeader(header)
if err != nil {
return nil, err
}
if err := st.SetLatestExecutionPayloadHeader(wrappedHeader); err != nil {
return nil, err
}
return st, nil
@@ -237,7 +236,7 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interf
if err != nil {
return err
}
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
if !bytes.Equal(header.ParentHash(), h.BlockHash) {
return ErrInvalidPayloadBlockHash
}
return nil

View File

@@ -7,14 +7,11 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -23,170 +20,127 @@ import (
func Test_IsMergeComplete(t *testing.T) {
tests := []struct {
name string
payload interfaces.ExecutionData
payload *enginev1.ExecutionPayloadHeader
want bool
}{
{
name: "empty payload header",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
want: false,
name: "empty payload header",
payload: emptyPayloadHeader(),
want: false,
},
{
name: "has parent hash",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has fee recipient",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has state root",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has receipt root",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has logs bloom",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
return h
}(),
want: true,
},
{
name: "has random",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has base fee",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has block hash",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has extra data",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "has block number",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.BlockNumber = 1
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockNumber = 1
return h
}(),
want: true,
},
{
name: "has gas limit",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.GasLimit = 1
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.GasLimit = 1
return h
}(),
want: true,
},
{
name: "has gas used",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.GasUsed = 1
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.GasUsed = 1
return h
}(),
want: true,
},
{
name: "has time stamp",
payload: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.Timestamp = 1
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.Timestamp = 1
return h
}(),
want: true,
@@ -195,7 +149,9 @@ func Test_IsMergeComplete(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.payload)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
got, err := blocks.IsMergeTransitionComplete(st)
require.NoError(t, err)
if got != tt.want {
@@ -205,13 +161,6 @@ func Test_IsMergeComplete(t *testing.T) {
}
}
func Test_IsMergeCompleteCapella(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, 1)
got, err := blocks.IsMergeTransitionComplete(st)
require.NoError(t, err)
require.Equal(t, got, true)
}
func Test_IsExecutionBlock(t *testing.T) {
tests := []struct {
name string
@@ -246,65 +195,40 @@ func Test_IsExecutionBlock(t *testing.T) {
}
}
func Test_IsExecutionBlockCapella(t *testing.T) {
blk := util.NewBeaconBlockCapella()
blk.Block.Body.ExecutionPayload = emptyPayloadCapella()
wrappedBlock, err := consensusblocks.NewBeaconBlock(blk.Block)
require.NoError(t, err)
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, false, got)
}
func Test_IsExecutionEnabled(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
useAltairSt bool
want bool
}{
{
name: "use older than bellatrix state",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
name: "use older than bellatrix state",
payload: emptyPayload(),
header: emptyPayloadHeader(),
useAltairSt: true,
want: false,
},
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
want: false,
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "empty header, non-empty payload",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
name: "empty header, non-empty payload",
header: emptyPayloadHeader(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
@@ -314,12 +238,9 @@ func Test_IsExecutionEnabled(t *testing.T) {
},
{
name: "non-empty header, non-empty payload",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
payload: func() *enginev1.ExecutionPayload {
@@ -333,7 +254,9 @@ func Test_IsExecutionEnabled(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.header)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := consensusblocks.NewBeaconBlockBody(blk.Block.Body)
@@ -350,53 +273,32 @@ func Test_IsExecutionEnabled(t *testing.T) {
}
}
func Test_IsExecutionEnabledCapella(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, 1)
blk := util.NewBeaconBlockCapella()
body, err := consensusblocks.NewBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsExecutionEnabled(st, body)
require.NoError(t, err)
require.Equal(t, false, got)
}
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
want: false,
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
},
{
name: "empty header, non-empty payload",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
name: "empty header, non-empty payload",
header: emptyPayloadHeader(),
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
@@ -406,12 +308,9 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
},
{
name: "non-empty header, non-empty payload",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
payload: func() *enginev1.ExecutionPayload {
@@ -441,18 +340,14 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "merge incomplete",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
err: nil,
header: emptyPayloadHeader(),
err: nil,
},
{
name: "validate passes",
@@ -461,12 +356,9 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
err: nil,
@@ -478,12 +370,9 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.BlockHash = bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength)
return h
}(),
err: blocks.ErrInvalidPayloadBlockHash,
@@ -492,7 +381,9 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.header)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
require.NoError(t, err)
err = blocks.ValidatePayloadWhenMergeCompletes(st, wrappedPayload)
@@ -602,30 +493,14 @@ func Test_ProcessPayload(t *testing.T) {
require.Equal(t, tt.err, err)
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
require.Equal(t, tt.err, err)
h, err := st.LatestExecutionPayloadHeader()
got, err := st.LatestExecutionPayloadHeader()
require.NoError(t, err)
got, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
require.DeepSSZEqual(t, want, got)
}
})
}
}
func Test_ProcessPayloadCapella(t *testing.T) {
spb := &ethpb.BeaconStateCapella{}
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
header, err := emptyPayloadHeaderCapella()
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(header))
payload := emptyPayloadCapella()
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
}
func Test_ProcessPayloadHeader(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
@@ -634,39 +509,29 @@ func Test_ProcessPayloadHeader(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "process passes",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.PrevRandao = random
p.Timestamp = uint64(ts.Unix())
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect prev randao",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
err: blocks.ErrInvalidPayloadPrevRandao,
name: "incorrect prev randao",
header: emptyPayloadHeader(),
err: blocks.ErrInvalidPayloadPrevRandao,
},
{
name: "incorrect timestamp",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.PrevRandao = random
p.Timestamp = 1
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = 1
return h
}(),
err: blocks.ErrInvalidPayloadTimeStamp,
@@ -674,18 +539,16 @@ func Test_ProcessPayloadHeader(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, err := blocks.ProcessPayloadHeader(st, tt.header)
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.header)
require.NoError(t, err)
st, err := blocks.ProcessPayloadHeader(st, wrappedHeader)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)
want, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
h, err := st.LatestExecutionPayloadHeader()
got, err := st.LatestExecutionPayloadHeader()
require.NoError(t, err)
got, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
require.DeepSSZEqual(t, want, got)
require.DeepSSZEqual(t, tt.header, got)
}
})
}
@@ -699,39 +562,29 @@ func Test_ValidatePayloadHeader(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "process passes",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.PrevRandao = random
p.Timestamp = uint64(ts.Unix())
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect prev randao",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
err: blocks.ErrInvalidPayloadPrevRandao,
name: "incorrect prev randao",
header: emptyPayloadHeader(),
err: blocks.ErrInvalidPayloadPrevRandao,
},
{
name: "incorrect timestamp",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.PrevRandao = random
p.Timestamp = 1
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = 1
return h
}(),
err: blocks.ErrInvalidPayloadTimeStamp,
@@ -739,7 +592,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayloadHeader(st, tt.header)
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.header)
require.NoError(t, err)
err = blocks.ValidatePayloadHeader(st, wrappedHeader)
require.Equal(t, tt.err, err)
})
}
@@ -754,14 +609,13 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
header interfaces.ExecutionData
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "no merge",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
return h
}(),
state: emptySt,
@@ -769,12 +623,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
},
{
name: "process passes",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = []byte{'a'}
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = []byte{'a'}
return h
}(),
state: st,
@@ -782,12 +633,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
},
{
name: "invalid block hash",
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
p, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
p.ParentHash = []byte{'b'}
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = []byte{'b'}
return h
}(),
state: st,
@@ -796,7 +644,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(tt.header)
require.NoError(t, err)
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, wrappedHeader)
require.Equal(t, tt.err, err)
})
}
@@ -845,9 +695,9 @@ func Test_PayloadToHeader(t *testing.T) {
func BenchmarkBellatrixComplete(b *testing.B) {
st, _ := util.DeterministicGenesisStateBellatrix(b, 1)
h, err := emptyPayloadHeader()
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(emptyPayloadHeader())
require.NoError(b, err)
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
require.NoError(b, st.SetLatestExecutionPayloadHeader(wrappedHeader))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -856,8 +706,8 @@ func BenchmarkBellatrixComplete(b *testing.B) {
}
}
func emptyPayloadHeader() (interfaces.ExecutionData, error) {
return consensusblocks.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
func emptyPayloadHeader() *enginev1.ExecutionPayloadHeader {
return &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
@@ -868,23 +718,7 @@ func emptyPayloadHeader() (interfaces.ExecutionData, error) {
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
})
}
func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
return consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
})
}
}
func emptyPayload() *enginev1.ExecutionPayload {
@@ -901,19 +735,3 @@ func emptyPayload() *enginev1.ExecutionPayload {
ExtraData: make([]byte, 0),
}
}
func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
return &enginev1.ExecutionPayloadCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
ExtraData: make([]byte, 0),
}
}

View File

@@ -24,27 +24,26 @@ type slashValidatorFunc func(ctx context.Context, st state.BeaconState, vid type
// slashing conditions if any slashable events occurred.
//
// Spec pseudocode definition:
// def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
// header_1 = proposer_slashing.signed_header_1.message
// header_2 = proposer_slashing.signed_header_2.message
//
// def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
// header_1 = proposer_slashing.signed_header_1.message
// header_2 = proposer_slashing.signed_header_2.message
// # Verify header slots match
// assert header_1.slot == header_2.slot
// # Verify header proposer indices match
// assert header_1.proposer_index == header_2.proposer_index
// # Verify the headers are different
// assert header_1 != header_2
// # Verify the proposer is slashable
// proposer = state.validators[header_1.proposer_index]
// assert is_slashable_validator(proposer, get_current_epoch(state))
// # Verify signatures
// for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2):
// domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
// signing_root = compute_signing_root(signed_header.message, domain)
// assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
//
// # Verify header slots match
// assert header_1.slot == header_2.slot
// # Verify header proposer indices match
// assert header_1.proposer_index == header_2.proposer_index
// # Verify the headers are different
// assert header_1 != header_2
// # Verify the proposer is slashable
// proposer = state.validators[header_1.proposer_index]
// assert is_slashable_validator(proposer, get_current_epoch(state))
// # Verify signatures
// for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2):
// domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
// signing_root = compute_signing_root(signed_header.message, domain)
// assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
//
// slash_validator(state, header_1.proposer_index)
// slash_validator(state, header_1.proposer_index)
func ProcessProposerSlashings(
ctx context.Context,
beaconState state.BeaconState,
@@ -82,7 +81,7 @@ func ProcessProposerSlashing(
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() == version.Bellatrix, beaconState.Version() == version.Capella:
case beaconState.Version() == version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")

View File

@@ -282,54 +282,6 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
// We test the case when data is correct and verify the validator
// registry has been updated.
beaconState, privKeys := util.DeterministicGenesisStateCapella(t, 100)
proposerIdx := types.ValidatorIndex(1)
header1 := &ethpb.SignedBeaconBlockHeader{
Header: util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
StateRoot: bytesutil.PadTo([]byte("A"), 32),
}),
}
var err error
header1.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, header1.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(t, err)
header2 := util.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,
StateRoot: bytesutil.PadTo([]byte("B"), 32),
},
})
header2.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, header2.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(t, err)
slashings := []*ethpb.ProposerSlashing{
{
Header_1: header1,
Header_2: header2,
},
}
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
if newStateVals[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf("Proposer with index 1 did not correctly exit,"+"wanted slot:%d, got:%d",
newStateVals[1].ExitEpoch, beaconState.Validators()[1].ExitEpoch)
}
require.Equal(t, uint64(31000000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestVerifyProposerSlashing(t *testing.T) {
type args struct {
beaconState state.BeaconState

View File

@@ -17,16 +17,15 @@ import (
// in the beacon state's latest randao mixes slice.
//
// Spec pseudocode definition:
//
// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
// epoch = get_current_epoch(state)
// # Verify RANDAO reveal
// proposer = state.validators[get_beacon_proposer_index(state)]
// signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO))
// assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal)
// # Mix in RANDAO reveal
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
// epoch = get_current_epoch(state)
// # Verify RANDAO reveal
// proposer = state.validators[get_beacon_proposer_index(state)]
// signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO))
// assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal)
// # Mix in RANDAO reveal
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
func ProcessRandao(
ctx context.Context,
beaconState state.BeaconState,
@@ -57,12 +56,11 @@ func ProcessRandao(
// in the beacon state's latest randao mixes slice.
//
// Spec pseudocode definition:
//
// # Mix it in
// state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = (
// xor(get_randao_mix(state, get_current_epoch(state)),
// hash(body.randao_reveal))
// )
// # Mix it in
// state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = (
// xor(get_randao_mix(state, get_current_epoch(state)),
// hash(body.randao_reveal))
// )
func ProcessRandaoNoVerify(
beaconState state.BeaconState,
randaoReveal []byte,

View File

@@ -2,94 +2,46 @@ package blocks
import (
"bytes"
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
const executionToBLSPadding = 12
func ProcessBLSToExecutionChanges(
st state.BeaconState,
signed interfaces.SignedBeaconBlock) (state.BeaconState, error) {
if signed.Version() < version.Capella {
return st, nil
}
changes, err := signed.Block().Body().BLSToExecutionChanges()
if err != nil {
return nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
}
// Return early if no changes
if len(changes) == 0 {
return st, nil
}
for _, change := range changes {
st, err = processBLSToExecutionChange(st, change)
if err != nil {
return nil, errors.Wrap(err, "could not process BLSToExecutionChange")
}
}
return st, nil
}
// processBLSToExecutionChange validates a SignedBLSToExecution message and
// ProcessBLSToExecutionChange validates a SignedBLSToExecution message and
// changes the validator's withdrawal address accordingly.
//
// Spec pseudocode definition:
//
// def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None:
//def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None:
// validator = state.validators[address_change.validator_index]
//
// validator = state.validators[address_change.validator_index]
// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
//
// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
// signing_root = compute_signing_root(address_change, domain)
// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
//
// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
// signing_root = compute_signing_root(address_change, domain)
// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
// validator.withdrawal_credentials = (
// ETH1_ADDRESS_WITHDRAWAL_PREFIX
// + b'\x00' * 11
// + address_change.to_execution_address
// )
//
// validator.withdrawal_credentials = (
// ETH1_ADDRESS_WITHDRAWAL_PREFIX
// + b'\x00' * 11
// + address_change.to_execution_address
// )
func processBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
// Checks that the message passes the validation conditions.
val, err := ValidateBLSToExecutionChange(st, signed)
if err != nil {
return nil, err
}
message := signed.Message
newCredentials := make([]byte, executionToBLSPadding)
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
return st, err
}
// ValidateBLSToExecutionChange validates the execution change message against the state and returns the
// validator referenced by the message.
func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.SignedBLSToExecutionChange) (*ethpb.Validator, error) {
func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
if signed == nil {
return nil, errNilSignedWithdrawalMessage
return st, errNilSignedWithdrawalMessage
}
message := signed.Message
if message == nil {
return nil, errNilWithdrawalMessage
return st, errNilWithdrawalMessage
}
val, err := st.ValidatorAtIndex(message.ValidatorIndex)
@@ -103,89 +55,24 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// hash the public key and verify it matches the withdrawal credentials
fromPubkey := message.FromBlsPubkey
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(fromPubkey)
if !bytes.Equal(digest[1:], cred[1:]) {
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(fromPubkey[:32]), bytesutil.ToBytes32(fromPubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
if !bytes.Equal(digest[0][1:], cred[1:]) {
return nil, errInvalidWithdrawalCredentials
}
return val, nil
}
func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal) (state.BeaconState, error) {
expected, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}
if len(expected) != len(withdrawals) {
return nil, errInvalidWithdrawalNumber
}
for i, withdrawal := range withdrawals {
if withdrawal.WithdrawalIndex != expected[i].WithdrawalIndex {
return nil, errInvalidWithdrawalIndex
}
if withdrawal.ValidatorIndex != expected[i].ValidatorIndex {
return nil, errInvalidValidatorIndex
}
if !bytes.Equal(withdrawal.ExecutionAddress, expected[i].ExecutionAddress) {
return nil, errInvalidExecutionAddress
}
if withdrawal.Amount != expected[i].Amount {
return nil, errInvalidWithdrawalAmount
}
err := helpers.DecreaseBalance(st, withdrawal.ValidatorIndex, withdrawal.Amount)
if err != nil {
return nil, errors.Wrap(err, "could not decrease balance")
}
}
if len(withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(withdrawals[len(withdrawals)-1].WithdrawalIndex + 1); err != nil {
return nil, errors.Wrap(err, "could not set next withdrawal index")
}
nextValidatorIndex := withdrawals[len(withdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == types.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return nil, errors.Wrap(err, "could not set latest withdrawal validator index")
}
}
return st, nil
}
func BLSChangesSignatureBatch(
ctx context.Context,
st state.ReadOnlyBeaconState,
changes []*ethpb.SignedBLSToExecutionChange,
) (*bls.SignatureBatch, error) {
// Return early if no changes
if len(changes) == 0 {
return bls.NewSet(), nil
}
batch := &bls.SignatureBatch{
Signatures: make([][]byte, len(changes)),
PublicKeys: make([]bls.PublicKey, len(changes)),
Messages: make([][32]byte, len(changes)),
}
epoch := slots.ToEpoch(st.Slot())
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
for i, change := range changes {
if ctx.Err() != nil {
return nil, ctx.Err()
}
batch.Signatures[i] = change.Signature
publicKey, err := bls.PublicKeyFromBytes(change.Message.FromBlsPubkey)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
batch.PublicKeys[i] = publicKey
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
if err != nil {
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
}
batch.Messages[i] = htr
if err := signing.VerifySigningRoot(message, fromPubkey, signed.Signature, domain); err != nil {
return nil, signing.ErrSigFailedToVerify
}
return batch, nil
newCredentials := make([]byte, executionToBLSPadding)
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
return st, err
}

View File

@@ -1,26 +1,18 @@
package blocks_test
import (
"context"
"math/rand"
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
func TestProcessBLSToExecutionChange(t *testing.T) {
@@ -35,13 +27,14 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[:],
WithdrawalCredentials: digest[0][:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -70,47 +63,6 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
require.DeepEqual(t, message.ToExecutionAddress, val.WithdrawalCredentials[12:])
})
t.Run("happy case only validation", func(t *testing.T) {
priv, err := bls.RandKey()
require.NoError(t, err)
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
ValidatorIndex: 0,
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: registry,
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
})
require.NoError(t, err)
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
val, err := blocks.ValidateBLSToExecutionChange(st, signed)
require.NoError(t, err)
require.DeepEqual(t, digest[:], val.WithdrawalCredentials)
})
t.Run("non-existent validator", func(t *testing.T) {
priv, err := bls.RandKey()
@@ -123,13 +75,14 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[:],
WithdrawalCredentials: digest[0][:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -202,13 +155,15 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
ValidatorIndex: 0,
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[:],
WithdrawalCredentials: digest[0][:],
},
}
registry[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
@@ -236,519 +191,3 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
})
}
func TestProcessWithdrawals(t *testing.T) {
const (
currentEpoch = types.Epoch(10)
epochInFuture = types.Epoch(12)
epochInPast = types.Epoch(8)
numValidators = 128
notWithdrawableIndex = 127
notPartiallyWithdrawable = 126
)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
type args struct {
Name string
NextWithdrawalValidatorIndex types.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []types.ValidatorIndex
PartialWithdrawalIndices []types.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
}
type control struct {
NextWithdrawalValidatorIndex types.ValidatorIndex
NextWithdrawalIndex uint64
ExpectedError bool
Balances map[uint64]uint64
}
type Test struct {
Args args
Control control
}
executionAddress := func(i types.ValidatorIndex) []byte {
wc := make([]byte, 20)
wc[19] = byte(i)
return wc
}
withdrawalAmount := func(i types.ValidatorIndex) uint64 {
return maxEffectiveBalance + uint64(i)*100000
}
fullWithdrawal := func(i types.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
WithdrawalIndex: idx,
ValidatorIndex: i,
ExecutionAddress: executionAddress(i),
Amount: withdrawalAmount(i),
}
}
partialWithdrawal := func(i types.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
WithdrawalIndex: idx,
ValidatorIndex: i,
ExecutionAddress: executionAddress(i),
Amount: withdrawalAmount(i) - maxEffectiveBalance,
}
}
tests := []Test{
{
Args: args{
Name: "success no withdrawals",
NextWithdrawalValidatorIndex: 10,
NextWithdrawalIndex: 3,
},
Control: control{
NextWithdrawalValidatorIndex: 10,
NextWithdrawalIndex: 3,
},
},
{
Args: args{
Name: "success one full withdrawal",
NextWithdrawalIndex: 3,
NextWithdrawalValidatorIndex: 5,
FullWithdrawalIndices: []types.ValidatorIndex{1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(1, 3),
},
},
Control: control{
NextWithdrawalValidatorIndex: 2,
NextWithdrawalIndex: 4,
Balances: map[uint64]uint64{1: 0},
},
},
{
Args: args{
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PartialWithdrawalIndices: []types.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 21),
},
},
Control: control{
NextWithdrawalValidatorIndex: 8,
NextWithdrawalIndex: 22,
Balances: map[uint64]uint64{7: maxEffectiveBalance},
},
},
{
Args: args{
Name: "success many full withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
fullWithdrawal(1, 25),
},
},
Control: control{
NextWithdrawalValidatorIndex: 2,
NextWithdrawalIndex: 26,
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0, 1: 0},
},
},
{
Args: args{
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
partialWithdrawal(1, 25),
},
},
Control: control{
NextWithdrawalValidatorIndex: 2,
NextWithdrawalIndex: 26,
Balances: map[uint64]uint64{
7: maxEffectiveBalance,
19: maxEffectiveBalance,
28: maxEffectiveBalance,
1: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 12,
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28},
PartialWithdrawalIndices: []types.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(15, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
partialWithdrawal(89, 25), partialWithdrawal(1, 26), partialWithdrawal(2, 27),
fullWithdrawal(7, 28),
},
},
Control: control{
NextWithdrawalValidatorIndex: 8,
NextWithdrawalIndex: 29,
Balances: map[uint64]uint64{
7: 0, 19: 0, 28: 0,
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
15: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success more than max fully withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
FullWithdrawalIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(1, 22), fullWithdrawal(2, 23), fullWithdrawal(3, 24),
fullWithdrawal(4, 25), fullWithdrawal(5, 26), fullWithdrawal(6, 27),
fullWithdrawal(7, 28), fullWithdrawal(8, 29), fullWithdrawal(9, 30),
fullWithdrawal(21, 31), fullWithdrawal(22, 32), fullWithdrawal(23, 33),
fullWithdrawal(24, 34), fullWithdrawal(25, 35), fullWithdrawal(26, 36),
fullWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,
21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0,
},
},
},
{
Args: args{
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PartialWithdrawalIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
partialWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: maxEffectiveBalance,
2: maxEffectiveBalance,
3: maxEffectiveBalance,
4: maxEffectiveBalance,
5: maxEffectiveBalance,
6: maxEffectiveBalance,
7: maxEffectiveBalance,
8: maxEffectiveBalance,
9: maxEffectiveBalance,
21: maxEffectiveBalance,
22: maxEffectiveBalance,
23: maxEffectiveBalance,
24: maxEffectiveBalance,
25: maxEffectiveBalance,
26: maxEffectiveBalance,
27: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PartialWithdrawalIndices: []types.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid withdrawal index",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 25),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid validator index",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(27, 24),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid withdrawal amount",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure validator not fully withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []types.ValidatorIndex{notWithdrawableIndex},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notWithdrawableIndex, 22),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []types.ValidatorIndex{notPartiallyWithdrawable},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notPartiallyWithdrawable, 22),
},
},
Control: control{
ExpectedError: true,
},
},
}
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
l, err := st.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
n, err := st.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalIndex, n)
balances := st.Balances()
for idx, bal := range expected.Balances {
require.Equal(t, bal, balances[idx])
}
}
prepareValidators := func(st *ethpb.BeaconStateCapella, arguments args) (state.BeaconState, error) {
validators := make([]*ethpb.Validator, numValidators)
st.Balances = make([]uint64, numValidators)
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = epochInFuture
v.WithdrawalCredentials = make([]byte, 32)
v.WithdrawalCredentials[31] = byte(i)
st.Balances[i] = v.EffectiveBalance - uint64(rand.Intn(1000))
validators[i] = v
}
for _, idx := range arguments.FullWithdrawalIndices {
if idx != notWithdrawableIndex {
validators[idx].WithdrawableEpoch = epochInPast
}
st.Balances[idx] = withdrawalAmount(idx)
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
}
for _, idx := range arguments.PartialWithdrawalIndices {
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
st.Balances[idx] = withdrawalAmount(idx)
}
st.Validators = validators
return state_native.InitializeFromProtoCapella(st)
}
for _, test := range tests {
t.Run(test.Args.Name, func(t *testing.T) {
if test.Args.Withdrawals == nil {
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
}
if test.Args.FullWithdrawalIndices == nil {
test.Args.FullWithdrawalIndices = make([]types.ValidatorIndex, 0)
}
if test.Args.PartialWithdrawalIndices == nil {
test.Args.PartialWithdrawalIndices = make([]types.ValidatorIndex, 0)
}
slot, err := slots.EpochStart(currentEpoch)
require.NoError(t, err)
spb := &ethpb.BeaconStateCapella{
Slot: slot,
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, test.Args.Withdrawals)
if test.Control.ExpectedError {
require.NotNil(t, err)
} else {
require.NoError(t, err)
checkPostState(t, test.Control, post)
}
})
}
}
func TestProcessBLSToExecutionChanges(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
numValidators := 10
validators := make([]*ethpb.Validator, numValidators)
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
spb.Balances = make([]uint64, numValidators)
privKeys := make([]common.SecretKey, numValidators)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
v.WithdrawalCredentials = make([]byte, 32)
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: executionAddress,
ValidatorIndex: types.ValidatorIndex(i),
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
copy(v.WithdrawalCredentials, digest[:])
validators[i] = v
blsChanges[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
signedChanges[i] = signed
}
body := &ethpb.BeaconBlockBodyCapella{
BlsToExecutionChanges: signedChanges,
}
bpb := &ethpb.BeaconBlockCapella{
Body: body,
}
sbpb := &ethpb.SignedBeaconBlockCapella{
Block: bpb,
}
signed, err := consensusblocks.NewSignedBeaconBlock(sbpb)
require.NoError(t, err)
st, err = blocks.ProcessBLSToExecutionChanges(st, signed)
require.NoError(t, err)
vals := st.Validators()
for _, val := range vals {
require.DeepEqual(t, executionAddress, val.WithdrawalCredentials[12:])
require.Equal(t, params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, val.WithdrawalCredentials[0])
}
}
func TestBLSChangesSignatureBatch(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
numValidators := 10
validators := make([]*ethpb.Validator, numValidators)
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
spb.Balances = make([]uint64, numValidators)
privKeys := make([]common.SecretKey, numValidators)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
v.WithdrawalCredentials = make([]byte, 32)
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: executionAddress,
ValidatorIndex: types.ValidatorIndex(i),
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
copy(v.WithdrawalCredentials, digest[:])
validators[i] = v
blsChanges[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
signedChanges[i] = signed
}
batch, err := blocks.BLSChangesSignatureBatch(context.Background(), st, signedChanges)
require.NoError(t, err)
verify, err := batch.Verify()
require.NoError(t, err)
require.Equal(t, true, verify)
}

View File

@@ -1,31 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["upgrade.go"],
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/capella",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["upgrade_test.go"],
deps = [
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -1,96 +0,0 @@
package capella
import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
// UpgradeToCapella updates a generic state to return the version Capella state.
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateCapella{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: state.HistoricalRoots(),
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: make([]byte, 32),
},
NextWithdrawalIndex: 0,
NextWithdrawalValidatorIndex: 0,
}
return state_native.InitializeFromProtoUnsafeCapella(s)
}

View File

@@ -1,101 +0,0 @@
package capella_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/capella"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestUpgradeToCapella(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
mSt, err := capella.UpgradeToCapella(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderCapella)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: make([]byte, 32),
}
require.DeepEqual(t, wanted, protoHeader)
nwi, err := mSt.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, uint64(0), nwi)
lwvi, err := mSt.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, types.ValidatorIndex(0), lwvi)
}

View File

@@ -49,13 +49,12 @@ func (s sortableIndices) Less(i, j int) bool {
// need to get attesting balance from attestations.
//
// Spec pseudocode definition:
//
// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
// """
// Return the combined effective balance of the set of unslashed validators participating in ``attestations``.
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// """
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
// """
// Return the combined effective balance of the set of unslashed validators participating in ``attestations``.
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// """
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) (uint64, error) {
indices, err := UnslashedAttestingIndices(ctx, state, atts)
if err != nil {
@@ -68,26 +67,25 @@ func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts
// the amount to rotate is determined churn limit.
//
// Spec pseudocode definition:
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
//
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
// initiate_validator_exit(state, ValidatorIndex(index))
//
// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
// initiate_validator_exit(state, ValidatorIndex(index))
//
// # Queue validators eligible for activation and not yet dequeued for activation
// activation_queue = sorted([
// index for index, validator in enumerate(state.validators)
// if is_eligible_for_activation(state, validator)
// # Order by the sequence of activation_eligibility_epoch setting and then index
// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
// # Dequeued validators for activation up to churn limit
// for index in activation_queue[:get_validator_churn_limit(state)]:
// validator = state.validators[index]
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
// # Queue validators eligible for activation and not yet dequeued for activation
// activation_queue = sorted([
// index for index, validator in enumerate(state.validators)
// if is_eligible_for_activation(state, validator)
// # Order by the sequence of activation_eligibility_epoch setting and then index
// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
// # Dequeued validators for activation up to churn limit
// for index in activation_queue[:get_validator_churn_limit(state)]:
// validator = state.validators[index]
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
vals := state.Validators()
@@ -157,16 +155,16 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
// ProcessSlashings processes the slashed validators during epoch processing,
//
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
func ProcessSlashings(state state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
totalBalance, err := helpers.TotalActiveBalance(state)
@@ -209,12 +207,11 @@ func ProcessSlashings(state state.BeaconState, slashingMultiplier uint64) (state
// ProcessEth1DataReset processes updates to ETH1 data votes during epoch processing.
//
// Spec pseudocode definition:
//
// def process_eth1_data_reset(state: BeaconState) -> None:
// next_epoch = Epoch(get_current_epoch(state) + 1)
// # Reset eth1 data votes
// if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
// state.eth1_data_votes = []
// def process_eth1_data_reset(state: BeaconState) -> None:
// next_epoch = Epoch(get_current_epoch(state) + 1)
// # Reset eth1 data votes
// if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
// state.eth1_data_votes = []
func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
@@ -232,19 +229,18 @@ func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) {
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
//
// Spec pseudocode definition:
//
// def process_effective_balance_updates(state: BeaconState) -> None:
// # Update effective balances with hysteresis
// for index, validator in enumerate(state.validators):
// balance = state.balances[index]
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
// if (
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
// def process_effective_balance_updates(state: BeaconState) -> None:
// # Update effective balances with hysteresis
// for index, validator in enumerate(state.validators):
// balance = state.balances[index]
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
// if (
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState, error) {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
maxEffBalance := params.BeaconConfig().MaxEffectiveBalance
@@ -289,11 +285,10 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState,
// ProcessSlashingsReset processes the total slashing balances updates during epoch processing.
//
// Spec pseudocode definition:
//
// def process_slashings_reset(state: BeaconState) -> None:
// next_epoch = Epoch(get_current_epoch(state) + 1)
// # Reset slashings
// state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
// def process_slashings_reset(state: BeaconState) -> None:
// next_epoch = Epoch(get_current_epoch(state) + 1)
// # Reset slashings
// state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
func ProcessSlashingsReset(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
@@ -319,12 +314,11 @@ func ProcessSlashingsReset(state state.BeaconState) (state.BeaconState, error) {
// ProcessRandaoMixesReset processes the final updates to RANDAO mix during epoch processing.
//
// Spec pseudocode definition:
//
// def process_randao_mixes_reset(state: BeaconState) -> None:
// current_epoch = get_current_epoch(state)
// next_epoch = Epoch(current_epoch + 1)
// # Set randao mix
// state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
// def process_randao_mixes_reset(state: BeaconState) -> None:
// current_epoch = get_current_epoch(state)
// next_epoch = Epoch(current_epoch + 1)
// # Set randao mix
// state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
@@ -352,13 +346,12 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
// ProcessHistoricalRootsUpdate processes the updates to historical root accumulator during epoch processing.
//
// Spec pseudocode definition:
//
// def process_historical_roots_update(state: BeaconState) -> None:
// # Set historical root accumulator
// next_epoch = Epoch(get_current_epoch(state) + 1)
// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
// state.historical_roots.append(hash_tree_root(historical_batch))
// def process_historical_roots_update(state: BeaconState) -> None:
// # Set historical root accumulator
// next_epoch = Epoch(get_current_epoch(state) + 1)
// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
// state.historical_roots.append(hash_tree_root(historical_batch))
func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := time.CurrentEpoch(state)
nextEpoch := currentEpoch + 1
@@ -385,11 +378,10 @@ func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, e
// ProcessParticipationRecordUpdates rotates current/previous epoch attestations during epoch processing.
//
// Spec pseudocode definition:
//
// def process_participation_record_updates(state: BeaconState) -> None:
// # Rotate current/previous epoch attestations
// state.previous_epoch_attestations = state.current_epoch_attestations
// state.current_epoch_attestations = []
// def process_participation_record_updates(state: BeaconState) -> None:
// # Rotate current/previous epoch attestations
// state.previous_epoch_attestations = state.current_epoch_attestations
// state.current_epoch_attestations = []
func ProcessParticipationRecordUpdates(state state.BeaconState) (state.BeaconState, error) {
if err := state.RotateAttestations(); err != nil {
return nil, err
@@ -444,13 +436,12 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
// it sorts the indices and filters out the slashed ones.
//
// Spec pseudocode definition:
//
// def get_unslashed_attesting_indices(state: BeaconState,
// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
// output = set() # type: Set[ValidatorIndex]
// for a in attestations:
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
// return set(filter(lambda index: not state.validators[index].slashed, output))
// def get_unslashed_attesting_indices(state: BeaconState,
// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
// output = set() # type: Set[ValidatorIndex]
// for a in attestations:
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
// return set(filter(lambda index: not state.validators[index].slashed, output))
func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) ([]types.ValidatorIndex, error) {
var setIndices []types.ValidatorIndex
seen := make(map[uint64]bool)

View File

@@ -183,7 +183,7 @@ func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
if stateVersion == version.Phase0 && v.IsPrevEpochAttester {
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
}
if stateVersion >= version.Altair && v.IsPrevEpochSourceAttester {
if (stateVersion == version.Altair || stateVersion == version.Bellatrix || stateVersion == version.EIP4844) && v.IsPrevEpochSourceAttester {
bBal.PrevEpochAttested += v.CurrentEpochEffectiveBalance
}
if v.IsPrevEpochTargetAttester {

View File

@@ -70,7 +70,7 @@ func TestUpdateBalance(t *testing.T) {
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
}
func TestUpdateBalanceDifferentVersions(t *testing.T) {
func TestUpdateBalanceBellatrixVersion(t *testing.T) {
vp := []*precompute.Validator{
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
{IsCurrentEpochTargetAttester: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
@@ -92,9 +92,6 @@ func TestUpdateBalanceDifferentVersions(t *testing.T) {
}
pBal := precompute.UpdateBalance(vp, &precompute.Balance{}, version.Bellatrix)
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
pBal = precompute.UpdateBalance(vp, &precompute.Balance{}, version.Capella)
assert.DeepEqual(t, wantedPBal, pBal, "Incorrect balance calculations")
}
func TestSameHead(t *testing.T) {

View File

@@ -42,18 +42,17 @@ func UnrealizedCheckpoints(st state.BeaconState) (uint64, *ethpb.Checkpoint, *et
// Note: this is an optimized version by passing in precomputed total and attesting balances.
//
// Spec pseudocode definition:
//
// def process_justification_and_finalization(state: BeaconState) -> None:
// # Initial FFG checkpoint values have a `0x00` stub for `root`.
// # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
// return
// previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
// current_attestations = get_matching_target_attestations(state, get_current_epoch(state))
// total_active_balance = get_total_active_balance(state)
// previous_target_balance = get_attesting_balance(state, previous_attestations)
// current_target_balance = get_attesting_balance(state, current_attestations)
// weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
// def process_justification_and_finalization(state: BeaconState) -> None:
// # Initial FFG checkpoint values have a `0x00` stub for `root`.
// # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
// return
// previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
// current_attestations = get_matching_target_attestations(state, get_current_epoch(state))
// total_active_balance = get_total_active_balance(state)
// previous_target_balance = get_attesting_balance(state, previous_attestations)
// current_target_balance = get_attesting_balance(state, current_attestations)
// weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal *Balance) (state.BeaconState, error) {
canProcessSlot, err := slots.EpochStart(2 /*epoch*/)
if err != nil {
@@ -84,7 +83,7 @@ func processJustificationBits(state state.BeaconState, totalActiveBalance, prevE
return newBits
}
// weighJustificationAndFinalization processes justification and finalization during
// updateJustificationAndFinalization processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
jc, fc, err := computeCheckpoints(state, newBits)
@@ -114,42 +113,41 @@ func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield
// checkpoints at epoch transition
// Spec pseudocode definition:
// def weigh_justification_and_finalization(state: BeaconState,
// total_active_balance: Gwei,
// previous_epoch_target_balance: Gwei,
// current_epoch_target_balance: Gwei) -> None:
// previous_epoch = get_previous_epoch(state)
// current_epoch = get_current_epoch(state)
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
// old_current_justified_checkpoint = state.current_justified_checkpoint
//
// total_active_balance: Gwei,
// previous_epoch_target_balance: Gwei,
// current_epoch_target_balance: Gwei) -> None:
// previous_epoch = get_previous_epoch(state)
// current_epoch = get_current_epoch(state)
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
// old_current_justified_checkpoint = state.current_justified_checkpoint
// # Process justifications
// state.previous_justified_checkpoint = state.current_justified_checkpoint
// state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
// state.justification_bits[0] = 0b0
// if previous_epoch_target_balance * 3 >= total_active_balance * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
// root=get_block_root(state, previous_epoch))
// state.justification_bits[1] = 0b1
// if current_epoch_target_balance * 3 >= total_active_balance * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
// root=get_block_root(state, current_epoch))
// state.justification_bits[0] = 0b1
//
// # Process justifications
// state.previous_justified_checkpoint = state.current_justified_checkpoint
// state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
// state.justification_bits[0] = 0b0
// if previous_epoch_target_balance * 3 >= total_active_balance * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
// root=get_block_root(state, previous_epoch))
// state.justification_bits[1] = 0b1
// if current_epoch_target_balance * 3 >= total_active_balance * 2:
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
// root=get_block_root(state, current_epoch))
// state.justification_bits[0] = 0b1
//
// # Process finalizations
// bits = state.justification_bits
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
// # Process finalizations
// bits = state.justification_bits
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_previous_justified_checkpoint
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
prevEpoch := time.PrevEpoch(state)
currentEpoch := time.CurrentEpoch(state)

View File

@@ -358,11 +358,10 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
// individual validator's base reward quotient.
//
// Spec pseudocode definition:
//
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
// total_balance = get_total_active_balance(state)
// effective_balance = state.validators[index].effective_balance
// return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
// total_balance = get_total_active_balance(state)
// effective_balance = state.validators[index].effective_balance
// return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
func baseReward(state state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) {
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {

View File

@@ -6,7 +6,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/execution",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl/testnet:__pkg__",
"//testing/spectest:__subpackages__",
"//validator/client:__pkg__",
],

View File

@@ -83,3 +83,15 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
return state_native.InitializeFromProtoUnsafeBellatrix(s)
}
// UpgradeToEip4844 updates inputs a generic state to return the version Eip4844 state.
func UpgradeToEip4844(state state.BeaconState) (state.BeaconState, error) {
if err := state.SetFork(&ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().Eip4844ForkVersion,
Epoch: time.CurrentEpoch(state),
}); err != nil {
return nil, err
}
return state, nil
}

View File

@@ -61,9 +61,6 @@ func TestUpgradeToBellatrix(t *testing.T) {
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
wanted := &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
@@ -79,5 +76,5 @@ func TestUpgradeToBellatrix(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
}
require.DeepEqual(t, wanted, protoHeader)
require.DeepEqual(t, wanted, header)
}

View File

@@ -35,15 +35,14 @@ var (
// count.
//
// Spec pseudocode definition:
//
// def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
// """
// Return the number of committees in each slot for the given ``epoch``.
// """
// return max(uint64(1), min(
// MAX_COMMITTEES_PER_SLOT,
// uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
// ))
// def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
// """
// Return the number of committees in each slot for the given ``epoch``.
// """
// return max(uint64(1), min(
// MAX_COMMITTEES_PER_SLOT,
// uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
// ))
func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
var committeesPerSlot = activeValidatorCount / uint64(params.BeaconConfig().SlotsPerEpoch) / params.BeaconConfig().TargetCommitteeSize
@@ -62,19 +61,18 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
// becomes expensive, consider using BeaconCommittee below.
//
// Spec pseudocode definition:
//
// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]:
// """
// Return the beacon committee at ``slot`` for ``index``.
// """
// epoch = compute_epoch_at_slot(slot)
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// return compute_committee(
// indices=get_active_validator_indices(state, epoch),
// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]:
// """
// Return the beacon committee at ``slot`` for ``index``.
// """
// epoch = compute_epoch_at_slot(slot)
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// return compute_committee(
// indices=get_active_validator_indices(state, epoch),
// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
func BeaconCommitteeFromState(ctx context.Context, state state.ReadOnlyBeaconState, slot types.Slot, committeeIndex types.CommitteeIndex) ([]types.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
@@ -103,19 +101,18 @@ func BeaconCommitteeFromState(ctx context.Context, state state.ReadOnlyBeaconSta
// from the spec definition. Having them as an argument allows for cheaper computation run time.
//
// Spec pseudocode definition:
//
// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]:
// """
// Return the beacon committee at ``slot`` for ``index``.
// """
// epoch = compute_epoch_at_slot(slot)
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// return compute_committee(
// indices=get_active_validator_indices(state, epoch),
// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]:
// """
// Return the beacon committee at ``slot`` for ``index``.
// """
// epoch = compute_epoch_at_slot(slot)
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// return compute_committee(
// indices=get_active_validator_indices(state, epoch),
// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
func BeaconCommittee(
ctx context.Context,
validatorIndices []types.ValidatorIndex,
@@ -177,14 +174,6 @@ func CommitteeAssignments(
if err != nil {
return nil, nil, err
}
minValidStartSlot := types.Slot(0)
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
}
if startSlot < minValidStartSlot {
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
}
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
@@ -392,17 +381,16 @@ func ClearCache() {
// validator indices and seed.
//
// Spec pseudocode definition:
//
// def compute_committee(indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// index: uint64,
// count: uint64) -> Sequence[ValidatorIndex]:
// """
// Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
// """
// start = (len(indices) * index) // count
// end = (len(indices) * uint64(index + 1)) // count
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
// def compute_committee(indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// index: uint64,
// count: uint64) -> Sequence[ValidatorIndex]:
// """
// Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
// """
// start = (len(indices) * index) // count
// end = (len(indices) * uint64(index + 1)) // count
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
func computeCommittee(
indices []types.ValidatorIndex,
seed [32]byte,

View File

@@ -235,25 +235,6 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
Slot: params.BeaconConfig().SlotsPerHistoricalRoot + 1,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, 0)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)

View File

@@ -14,13 +14,12 @@ import (
// It returns an error if the requested block root is not within the slot range.
//
// Spec pseudocode definition:
//
// def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root:
// """
// Return the block root at a recent ``slot``.
// """
// assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
// return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
// def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root:
// """
// Return the block root at a recent ``slot``.
// """
// assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
// return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
func BlockRootAtSlot(state state.ReadOnlyBeaconState, slot types.Slot) ([]byte, error) {
if math.MaxUint64-slot < params.BeaconConfig().SlotsPerHistoricalRoot {
return []byte{}, errors.New("slot overflows uint64")
@@ -43,12 +42,11 @@ func StateRootAtSlot(state state.ReadOnlyBeaconState, slot types.Slot) ([]byte,
// BlockRoot returns the block root stored in the BeaconState for epoch start slot.
//
// Spec pseudocode definition:
//
// def get_block_root(state: BeaconState, epoch: Epoch) -> Root:
// """
// Return the block root at the start of a recent ``epoch``.
// """
// return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
// def get_block_root(state: BeaconState, epoch: Epoch) -> Root:
// """
// Return the block root at the start of a recent ``epoch``.
// """
// return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
func BlockRoot(state state.ReadOnlyBeaconState, epoch types.Epoch) ([]byte, error) {
s, err := slots.EpochStart(epoch)
if err != nil {

View File

@@ -12,13 +12,12 @@ import (
// Seed returns the randao seed used for shuffling of a given epoch.
//
// Spec pseudocode definition:
//
// def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32:
// """
// Return the seed at ``epoch``.
// """
// mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
// return hash(domain_type + uint_to_bytes(epoch) + mix)
// def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32:
// """
// Return the seed at ``epoch``.
// """
// mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
// return hash(domain_type + uint_to_bytes(epoch) + mix)
func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.DomainByteLength]byte) ([32]byte, error) {
// See https://github.com/ethereum/consensus-specs/pull/1296 for
// rationale on why offset has to look down by 1.
@@ -41,12 +40,11 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain
// of a given slot. It is used to shuffle validators.
//
// Spec pseudocode definition:
//
// def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32:
// """
// Return the randao mix at a recent ``epoch``.
// """
// return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
// def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32:
// """
// Return the randao mix at a recent ``epoch``.
// """
// return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
func RandaoMix(state state.ReadOnlyBeaconState, epoch types.Epoch) ([]byte, error) {
return state.RandaoMixAtIndex(uint64(epoch % params.BeaconConfig().EpochsPerHistoricalVector))
}

View File

@@ -17,14 +17,13 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// of input validators.
//
// Spec pseudocode definition:
//
// def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
// def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex) uint64 {
total := uint64(0)
@@ -48,13 +47,12 @@ func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex
// of active validators.
//
// Spec pseudocode definition:
//
// def get_total_active_balance(state: BeaconState) -> Gwei:
// """
// Return the combined effective balance of the active validators.
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// """
// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
// def get_total_active_balance(state: BeaconState) -> Gwei:
// """
// Return the combined effective balance of the active validators.
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// """
// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
bal, err := balanceCache.Get(s)
switch {
@@ -90,12 +88,11 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
//
// Spec pseudocode definition:
//
// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
@@ -113,12 +110,11 @@ func IncreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta ui
// the post balance.
//
// Spec pseudocode definition:
//
// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalanceWithVal(currBalance, delta uint64) (uint64, error) {
return mathutil.Add64(currBalance, delta)
}
@@ -126,12 +122,11 @@ func IncreaseBalanceWithVal(currBalance, delta uint64) (uint64, error) {
// DecreaseBalance decreases validator with the given 'index' balance by 'delta' in Gwei.
//
// Spec pseudocode definition:
//
// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
@@ -145,12 +140,11 @@ func DecreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta ui
// the post balance.
//
// Spec pseudocode definition:
//
// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
// """
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
@@ -162,8 +156,7 @@ func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 {
//
// Spec code:
// def is_in_inactivity_leak(state: BeaconState) -> bool:
//
// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
func IsInInactivityLeak(prevEpoch, finalizedEpoch types.Epoch) bool {
return FinalityDelay(prevEpoch, finalizedEpoch) > params.BeaconConfig().MinEpochsToInactivityPenalty
}
@@ -172,8 +165,7 @@ func IsInInactivityLeak(prevEpoch, finalizedEpoch types.Epoch) bool {
//
// Spec code:
// def get_finality_delay(state: BeaconState) -> uint64:
//
// return get_previous_epoch(state) - state.finalized_checkpoint.epoch
// return get_previous_epoch(state) - state.finalized_checkpoint.epoch
func FinalityDelay(prevEpoch, finalizedEpoch types.Epoch) types.Epoch {
return prevEpoch - finalizedEpoch
}

View File

@@ -31,7 +31,7 @@ func SplitIndices(l []uint64, n uint64) [][]uint64 {
return divided
}
// ShuffledIndex returns `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with seed as entropy.
// ShuffledIndex returns `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy.
// We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays
// constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
@@ -47,29 +47,28 @@ func UnShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [32]byt
// ComputeShuffledIndex returns the shuffled validator index corresponding to seed and index count.
// Spec pseudocode definition:
// def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
// """
// Return the shuffled index corresponding to ``seed`` (and ``index_count``).
// """
// assert index < index_count
//
// def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
// """
// Return the shuffled index corresponding to ``seed`` (and ``index_count``).
// """
// assert index < index_count
// # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
// # See the 'generalized domain' algorithm on page 3
// for current_round in range(SHUFFLE_ROUND_COUNT):
// pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count
// flip = (pivot + index_count - index) % index_count
// position = max(index, flip)
// source = hash(
// seed
// + uint_to_bytes(uint8(current_round))
// + uint_to_bytes(uint32(position // 256))
// )
// byte = uint8(source[(position % 256) // 8])
// bit = (byte >> (position % 8)) % 2
// index = flip if bit else index
//
// # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
// # See the 'generalized domain' algorithm on page 3
// for current_round in range(SHUFFLE_ROUND_COUNT):
// pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count
// flip = (pivot + index_count - index) % index_count
// position = max(index, flip)
// source = hash(
// seed
// + uint_to_bytes(uint8(current_round))
// + uint_to_bytes(uint32(position // 256))
// )
// byte = uint8(source[(position % 256) // 8])
// bit = (byte >> (position % 8)) % 2
// index = flip if bit else index
//
// return index
// return index
func ComputeShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [32]byte, shuffle bool) (types.ValidatorIndex, error) {
if params.BeaconConfig().ShuffleRoundCount == 0 {
return index, nil
@@ -136,21 +135,20 @@ func ComputeShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [3
return index, nil
}
// ShuffleList returns list of shuffled indexes in a pseudorandom permutation `p` of `0...list_size - 1` with seed as entropy.
// ShuffleList returns list of shuffled indexes in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy.
// We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays
// constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
//
// improvements:
// - seed is always the first 32 bytes of the hash input, we just copy it into the buffer one time.
// - add round byte to seed and hash that part of the buffer.
// - split up the for-loop in two:
// 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2).
// 2. Handle the part from pivot (excl) to N (excl). This is mirrored around ((pivot / 2) + (size/2)).
// - hash source every 256 iterations.
// - change byteV every 8 iterations.
// - we start at the edges, and work back to the mirror point.
// this makes us process each pear exactly once (instead of unnecessarily twice, like in the spec).
// improvements:
// - seed is always the first 32 bytes of the hash input, we just copy it into the buffer one time.
// - add round byte to seed and hash that part of the buffer.
// - split up the for-loop in two:
// 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2).
// 2. Handle the part from pivot (excl) to N (excl). This is mirrored around ((pivot / 2) + (size/2)).
// - hash source every 256 iterations.
// - change byteV every 8 iterations.
// - we start at the edges, and work back to the mirror point.
// this makes us process each pear exactly once (instead of unnecessarily twice, like in the spec).
func ShuffleList(input []types.ValidatorIndex, seed [32]byte) ([]types.ValidatorIndex, error) {
return innerShuffleList(input, seed, true /* shuffle */)
}

View File

@@ -28,12 +28,11 @@ var CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{
// is active or not.
//
// Spec pseudocode definition:
//
// def is_active_validator(validator: Validator, epoch: Epoch) -> bool:
// """
// Check if ``validator`` is active.
// """
// return validator.activation_epoch <= epoch < validator.exit_epoch
// def is_active_validator(validator: Validator, epoch: Epoch) -> bool:
// """
// Check if ``validator`` is active.
// """
// return validator.activation_epoch <= epoch < validator.exit_epoch
func IsActiveValidator(validator *ethpb.Validator, epoch types.Epoch) bool {
return checkValidatorActiveStatus(validator.ActivationEpoch, validator.ExitEpoch, epoch)
}
@@ -51,12 +50,11 @@ func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch types.Epoch) b
// is slashable or not.
//
// Spec pseudocode definition:
//
// def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
// """
// Check if ``validator`` is slashable.
// """
// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
// def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
// """
// Check if ``validator`` is slashable.
// """
// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
func IsSlashableValidator(activationEpoch, withdrawableEpoch types.Epoch, slashed bool, epoch types.Epoch) bool {
return checkValidatorSlashable(activationEpoch, withdrawableEpoch, slashed, epoch)
}
@@ -80,12 +78,11 @@ func checkValidatorSlashable(activationEpoch, withdrawableEpoch types.Epoch, sla
// need the active validator indices for some specific reason.
//
// Spec pseudocode definition:
//
// def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
// """
// Return the sequence of active validator indices at ``epoch``.
// """
// return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)]
// def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
// """
// Return the sequence of active validator indices at ``epoch``.
// """
// return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)]
func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, error) {
seed, err := Seed(s, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
@@ -189,12 +186,11 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
// the validator is eligible for activation and exit.
//
// Spec pseudocode definition:
//
// def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
// """
// Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
// """
// return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
// def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
// """
// Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
// """
// return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
func ActivationExitEpoch(epoch types.Epoch) types.Epoch {
return epoch + 1 + params.BeaconConfig().MaxSeedLookahead
}
@@ -203,13 +199,12 @@ func ActivationExitEpoch(epoch types.Epoch) types.Epoch {
// enter and exit validator pool for an epoch.
//
// Spec pseudocode definition:
//
// def get_validator_churn_limit(state: BeaconState) -> uint64:
// """
// Return the validator churn limit for the current epoch.
// """
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
// def get_validator_churn_limit(state: BeaconState) -> uint64:
// """
// Return the validator churn limit for the current epoch.
// """
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient
if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit {
@@ -221,15 +216,14 @@ func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
// BeaconProposerIndex returns proposer index of a current slot.
//
// Spec pseudocode definition:
//
// def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
// """
// Return the beacon proposer index at the current slot.
// """
// epoch = get_current_epoch(state)
// seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot))
// indices = get_active_validator_indices(state, epoch)
// return compute_proposer_index(state, indices, seed)
// def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
// """
// Return the beacon proposer index at the current slot.
// """
// epoch = get_current_epoch(state)
// seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot))
// indices = get_active_validator_indices(state, epoch)
// return compute_proposer_index(state, indices, seed)
func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (types.ValidatorIndex, error) {
e := time.CurrentEpoch(state)
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
@@ -280,22 +274,21 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
// ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer.
//
// Spec pseudocode definition:
//
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_BYTE = 2**8 - 1
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
// return candidate_index
// i += 1
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_BYTE = 2**8 - 1
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
// return candidate_index
// i += 1
func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []types.ValidatorIndex, seed [32]byte) (types.ValidatorIndex, error) {
length := uint64(len(activeIndices))
if length == 0 {
@@ -331,15 +324,14 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []types
// be placed into the activation queue.
//
// Spec pseudocode definition:
//
// def is_eligible_for_activation_queue(validator: Validator) -> bool:
// """
// Check if ``validator`` is eligible to be placed into the activation queue.
// """
// return (
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
// and validator.effective_balance == MAX_EFFECTIVE_BALANCE
// )
// def is_eligible_for_activation_queue(validator: Validator) -> bool:
// """
// Check if ``validator`` is eligible to be placed into the activation queue.
// """
// return (
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
// and validator.effective_balance == MAX_EFFECTIVE_BALANCE
// )
func IsEligibleForActivationQueue(validator *ethpb.Validator) bool {
return isEligibileForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
}
@@ -359,17 +351,16 @@ func isEligibileForActivationQueue(activationEligibilityEpoch types.Epoch, effec
// IsEligibleForActivation checks if the validator is eligible for activation.
//
// Spec pseudocode definition:
//
// def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool:
// """
// Check if ``validator`` is eligible for activation.
// """
// return (
// # Placement in queue is finalized
// validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch
// # Has not yet been activated
// and validator.activation_epoch == FAR_FUTURE_EPOCH
// )
// def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool:
// """
// Check if ``validator`` is eligible for activation.
// """
// return (
// # Placement in queue is finalized
// validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch
// # Has not yet been activated
// and validator.activation_epoch == FAR_FUTURE_EPOCH
// )
func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator *ethpb.Validator) bool {
finalizedEpoch := state.FinalizedCheckpointEpoch()
return isEligibleForActivation(validator.ActivationEligibilityEpoch, validator.ActivationEpoch, finalizedEpoch)

View File

@@ -25,37 +25,36 @@ import (
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#calculating-the-weak-subjectivity-period
//
// def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
// """
// Returns the weak subjectivity period for the current ``state``.
// This computation takes into account the effect of:
// - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
// - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
// A detailed calculation can be found at:
// https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
// """
// ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// N = len(get_active_validator_indices(state, get_current_epoch(state)))
// t = get_total_active_balance(state) // N // ETH_TO_GWEI
// T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
// delta = get_validator_churn_limit(state)
// Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
// D = SAFETY_DECAY
//
// """
// Returns the weak subjectivity period for the current ``state``.
// This computation takes into account the effect of:
// - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
// - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
// A detailed calculation can be found at:
// https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
// """
// ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// N = len(get_active_validator_indices(state, get_current_epoch(state)))
// t = get_total_active_balance(state) // N // ETH_TO_GWEI
// T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
// delta = get_validator_churn_limit(state)
// Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
// D = SAFETY_DECAY
// if T * (200 + 3 * D) < t * (200 + 12 * D):
// epochs_for_validator_set_churn = (
// N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
// )
// epochs_for_balance_top_ups = (
// N * (200 + 3 * D) // (600 * Delta)
// )
// ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
// else:
// ws_period += (
// 3 * N * D * t // (200 * Delta * (T - t))
// )
//
// if T * (200 + 3 * D) < t * (200 + 12 * D):
// epochs_for_validator_set_churn = (
// N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
// )
// epochs_for_balance_top_ups = (
// N * (200 + 3 * D) // (600 * Delta)
// )
// ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
// else:
// ws_period += (
// 3 * N * D * t // (200 * Delta * (T - t))
// )
//
// return ws_period
// return ws_period
func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) {
// Weak subjectivity period cannot be smaller than withdrawal delay.
wsp := uint64(cfg.MinValidatorWithdrawabilityDelay)
@@ -115,15 +114,14 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#checking-for-stale-weak-subjectivity-checkpoint
//
// def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
// # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
// assert ws_state.latest_block_header.state_root == ws_checkpoint.root
// assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
//
// # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
// assert ws_state.latest_block_header.state_root == ws_checkpoint.root
// assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
//
// ws_period = compute_weak_subjectivity_period(ws_state)
// ws_state_epoch = compute_epoch_at_slot(ws_state.slot)
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
// return current_epoch <= ws_state_epoch + ws_period
// ws_period = compute_weak_subjectivity_period(ws_state)
// ws_state_epoch = compute_epoch_at_slot(ws_state.slot)
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
// return current_epoch <= ws_state_epoch + ws_period
func IsWithinWeakSubjectivityPeriod(
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [fieldparams.RootLength]byte, wsEpoch types.Epoch, cfg *params.BeaconChainConfig) (bool, error) {
// Make sure that incoming objects are not nil.

View File

@@ -10,14 +10,13 @@ import (
// Domain returns the domain version for BLS private key to sign and verify.
//
// Spec pseudocode definition:
//
// def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain:
// """
// Return the signature domain (fork version concatenated with domain type) of a message.
// """
// epoch = get_current_epoch(state) if epoch is None else epoch
// fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
// return compute_domain(domain_type, fork_version, state.genesis_validators_root)
// def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain:
// """
// Return the signature domain (fork version concatenated with domain type) of a message.
// """
// epoch = get_current_epoch(state) if epoch is None else epoch
// fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
// return compute_domain(domain_type, fork_version, state.genesis_validators_root)
func Domain(fork *eth.Fork, epoch types.Epoch, domainType [bls.DomainByteLength]byte, genesisRoot []byte) ([]byte, error) {
if fork == nil {
return []byte{}, errors.New("nil fork or domain type")

View File

@@ -37,22 +37,21 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj f
// ComputeSigningRoot computes the root of the object by calculating the hash tree root of the signing data with the given domain.
//
// Spec pseudocode definition:
//
// def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
// """
// Return the signing root for the corresponding signing data.
// """
// return hash_tree_root(SigningData(
// object_root=hash_tree_root(ssz_object),
// domain=domain,
// ))
// def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
// """
// Return the signing root for the corresponding signing data.
// """
// return hash_tree_root(SigningData(
// object_root=hash_tree_root(ssz_object),
// domain=domain,
// ))
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
return SigningData(object.HashTreeRoot, domain)
return signingData(object.HashTreeRoot, domain)
}
// Computes the signing data by utilising the provided root function and then
// returning the signing data of the container object.
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
objRoot, err := rootFunc()
if err != nil {
return [32]byte{}, err
@@ -107,7 +106,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
root, err := SigningData(blkHdr.HashTreeRoot, domain)
root, err := signingData(blkHdr.HashTreeRoot, domain)
if err != nil {
return errors.Wrap(err, "could not compute signing root")
}
@@ -146,7 +145,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
// utilize custom block hashing function
root, err := SigningData(rootFunc, domain)
root, err := signingData(rootFunc, domain)
if err != nil {
return nil, errors.Wrap(err, "could not compute signing root")
}
@@ -161,16 +160,15 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
// array as the fork version.
//
// def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain:
//
// """
// Return the domain for the ``domain_type`` and ``fork_version``.
// """
// if fork_version is None:
// fork_version = GENESIS_FORK_VERSION
// if genesis_validators_root is None:
// genesis_validators_root = Root() # all bytes zero by default
// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
// return Domain(domain_type + fork_data_root[:28])
// """
// Return the domain for the ``domain_type`` and ``fork_version``.
// """
// if fork_version is None:
// fork_version = GENESIS_FORK_VERSION
// if genesis_validators_root is None:
// genesis_validators_root = Root() # all bytes zero by default
// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
// return Domain(domain_type + fork_data_root[:28])
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValidatorsRoot []byte) ([]byte, error) {
if forkVersion == nil {
forkVersion = params.BeaconConfig().GenesisForkVersion
@@ -197,20 +195,19 @@ func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
return b
}
// this returns the 32byte fork data root for the current_version and genesis_validators_root.
// this returns the 32byte fork data root for the ``current_version`` and ``genesis_validators_root``.
// This is used primarily in signature domains to avoid collisions across forks/chains.
//
// Spec pseudocode definition:
//
// def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root:
// """
// Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
// This is used primarily in signature domains to avoid collisions across forks/chains.
// """
// return hash_tree_root(ForkData(
// current_version=current_version,
// genesis_validators_root=genesis_validators_root,
// ))
// def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root:
// """
// Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
// This is used primarily in signature domains to avoid collisions across forks/chains.
// """
// return hash_tree_root(ForkData(
// current_version=current_version,
// genesis_validators_root=genesis_validators_root,
// ))
func computeForkDataRoot(version, root []byte) ([32]byte, error) {
r, err := (&ethpb.ForkData{
CurrentVersion: version,
@@ -225,14 +222,13 @@ func computeForkDataRoot(version, root []byte) ([32]byte, error) {
// ComputeForkDigest returns the fork for the current version and genesis validators root
//
// Spec pseudocode definition:
//
// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:
// """
// Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``.
// This is a digest primarily used for domain separation on the p2p layer.
// 4-bytes suffices for practical separation of forks/chains.
// """
// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:
// """
// Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``.
// This is a digest primarily used for domain separation on the p2p layer.
// 4-bytes suffices for practical separation of forks/chains.
// """
// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
func ComputeForkDigest(version, genesisValidatorsRoot []byte) ([4]byte, error) {
dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot)
if err != nil {

View File

@@ -12,12 +12,11 @@ import (
// the slot number stored in beacon state.
//
// Spec pseudocode definition:
//
// def get_current_epoch(state: BeaconState) -> Epoch:
// """
// Return the current epoch.
// """
// return compute_epoch_at_slot(state.slot)
// def get_current_epoch(state: BeaconState) -> Epoch:
// """
// Return the current epoch.
// """
// return compute_epoch_at_slot(state.slot)
func CurrentEpoch(state state.ReadOnlyBeaconState) types.Epoch {
return slots.ToEpoch(state.Slot())
}
@@ -27,13 +26,12 @@ func CurrentEpoch(state state.ReadOnlyBeaconState) types.Epoch {
// underflow condition.
//
// Spec pseudocode definition:
//
// def get_previous_epoch(state: BeaconState) -> Epoch:
// """`
// Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
// """
// current_epoch = get_current_epoch(state)
// return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1)
// def get_previous_epoch(state: BeaconState) -> Epoch:
// """`
// Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
// """
// current_epoch = get_current_epoch(state)
// return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1)
func PrevEpoch(state state.ReadOnlyBeaconState) types.Epoch {
currentEpoch := CurrentEpoch(state)
if currentEpoch == 0 {
@@ -72,21 +70,17 @@ func CanUpgradeToBellatrix(slot types.Slot) bool {
return epochStart && bellatrixEpoch
}
// CanUpgradeToCapella returns true if the input `slot` can upgrade to Capella.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == CAPELLA_FORK_EPOCH
func CanUpgradeToCapella(slot types.Slot) bool {
func CanUpgradeToEip4844(slot types.Slot) bool {
epochStart := slots.IsEpochStart(slot)
capellaEpoch := slots.ToEpoch(slot) == params.BeaconConfig().CapellaForkEpoch
return epochStart && capellaEpoch
e := slots.ToEpoch(slot) == params.BeaconConfig().Eip4844ForkEpoch
return epochStart && e
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//
// Spec pseudocode definition:
//
// If (state.slot + 1) % SLOTS_PER_EPOCH == 0:
// If (state.slot + 1) % SLOTS_PER_EPOCH == 0:
func CanProcessEpoch(state state.ReadOnlyBeaconState) bool {
return (state.Slot()+1)%params.BeaconConfig().SlotsPerEpoch == 0
}

View File

@@ -263,38 +263,3 @@ func TestAltairCompatible(t *testing.T) {
})
}
}
func TestCanUpgradeToCapella(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.CapellaForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot types.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not capella epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "capella epoch",
slot: types.Slot(params.BeaconConfig().CapellaForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToCapella(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToCapella() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -25,7 +25,6 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/capella:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/execution:go_default_library",
@@ -37,6 +36,7 @@ go_library(
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blobs:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -257,43 +257,3 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState,
beaconState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
return beaconState, blk
}
func createFullCapellaBlockWithOperations(t *testing.T) (state.BeaconState,
*ethpb.SignedBeaconBlockCapella) {
_, bellatrixBlk := createFullBellatrixBlockWithOperations(t)
blk := &ethpb.SignedBeaconBlockCapella{
Block: &ethpb.BeaconBlockCapella{
Slot: bellatrixBlk.Block.Slot,
ProposerIndex: bellatrixBlk.Block.ProposerIndex,
ParentRoot: bellatrixBlk.Block.ParentRoot,
StateRoot: bellatrixBlk.Block.StateRoot,
Body: &ethpb.BeaconBlockBodyCapella{
RandaoReveal: bellatrixBlk.Block.Body.RandaoReveal,
Eth1Data: bellatrixBlk.Block.Body.Eth1Data,
Graffiti: bellatrixBlk.Block.Body.Graffiti,
ProposerSlashings: bellatrixBlk.Block.Body.ProposerSlashings,
AttesterSlashings: bellatrixBlk.Block.Body.AttesterSlashings,
Attestations: bellatrixBlk.Block.Body.Attestations,
Deposits: bellatrixBlk.Block.Body.Deposits,
VoluntaryExits: bellatrixBlk.Block.Body.VoluntaryExits,
SyncAggregate: bellatrixBlk.Block.Body.SyncAggregate,
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
ExtraData: make([]byte, 0),
},
},
},
Signature: nil,
}
beaconState, _ := util.DeterministicGenesisStateCapella(t, 32)
return beaconState, blk
}

Some files were not shown because too many files have changed in this diff Show More