mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
103 Commits
ignore_rid
...
lighter-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
05d977a8fc | ||
|
|
099a01a63b | ||
|
|
3f7c54c563 | ||
|
|
f6e641f3fc | ||
|
|
423089aa0a | ||
|
|
59d4f61ebb | ||
|
|
e8cada76b6 | ||
|
|
7781eb60f4 | ||
|
|
396b8bf970 | ||
|
|
d5107942a1 | ||
|
|
bd4a520013 | ||
|
|
4a4dbba067 | ||
|
|
a0ff1351a0 | ||
|
|
42309121da | ||
|
|
7e6fd5fd8b | ||
|
|
d984210baa | ||
|
|
31c72672d7 | ||
|
|
8c1e180dd1 | ||
|
|
886d76fe7c | ||
|
|
a602acf492 | ||
|
|
1b6547de6a | ||
|
|
88685bb3bd | ||
|
|
2319b7d4bd | ||
|
|
82b2840d68 | ||
|
|
cf221d0f4c | ||
|
|
0956e3a657 | ||
|
|
351ed1c511 | ||
|
|
9809f5ac77 | ||
|
|
cff5e2b5fe | ||
|
|
dd15f9e0cc | ||
|
|
1c9ded4684 | ||
|
|
d4cc6fcf4a | ||
|
|
49c16f1a71 | ||
|
|
e70b606e78 | ||
|
|
0e8b37c317 | ||
|
|
e80db9554d | ||
|
|
d0bf03e863 | ||
|
|
b7e0819f00 | ||
|
|
7d64104003 | ||
|
|
b1e8a9ea3d | ||
|
|
cc1028ca3c | ||
|
|
233f4d99a2 | ||
|
|
a068f3877e | ||
|
|
856907d760 | ||
|
|
c6801df05a | ||
|
|
bc7b15b04e | ||
|
|
eb713d1177 | ||
|
|
844b2c6602 | ||
|
|
9efaa832cd | ||
|
|
e9d26c61d7 | ||
|
|
374d77f437 | ||
|
|
1f6d1d1852 | ||
|
|
0eff83cb9d | ||
|
|
ffe2f6b732 | ||
|
|
d57bca97a5 | ||
|
|
b45a6664be | ||
|
|
c56abfb840 | ||
|
|
d70f477b1e | ||
|
|
db096488b0 | ||
|
|
344e68b81b | ||
|
|
1962cca69e | ||
|
|
4a374435c0 | ||
|
|
0fde4a22e1 | ||
|
|
62ecc0d177 | ||
|
|
97dfec84f6 | ||
|
|
53bc96844e | ||
|
|
ddcf0c18dc | ||
|
|
45a2746d0e | ||
|
|
09f3df309d | ||
|
|
96df81d5c5 | ||
|
|
c47c52152b | ||
|
|
4cbe144a6c | ||
|
|
52b9b65adb | ||
|
|
ea59b1ec71 | ||
|
|
175c484c44 | ||
|
|
8aaab86987 | ||
|
|
381116a3e8 | ||
|
|
3d61fd0436 | ||
|
|
b19d24c581 | ||
|
|
8387088a52 | ||
|
|
ce7452c97a | ||
|
|
5e56b5fdd7 | ||
|
|
bfaba378f6 | ||
|
|
3bd116db16 | ||
|
|
7d2ddaee43 | ||
|
|
122a7782ff | ||
|
|
9b1b6f9be6 | ||
|
|
0eb08a4f96 | ||
|
|
bdfa06ed65 | ||
|
|
a94f2b93e3 | ||
|
|
4c47756aed | ||
|
|
440841d565 | ||
|
|
ff99616833 | ||
|
|
f537a98fcd | ||
|
|
cee38660c7 | ||
|
|
481d77bfde | ||
|
|
590317553c | ||
|
|
68b7d1009e | ||
|
|
b5b8825cc8 | ||
|
|
382b8b23c2 | ||
|
|
40a3ebab91 | ||
|
|
83af9a5694 | ||
|
|
6a45323ab7 |
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:alpine
|
||||
FROM golang:1.21-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.5'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.5'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ '*' ]
|
||||
merge_group:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
jobs:
|
||||
@@ -28,10 +28,10 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.20
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.5'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.20
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.5'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.52.2
|
||||
version: v1.55.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.5'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -6,21 +6,83 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.19'
|
||||
go: '1.21.5'
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- unused
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
- dupword
|
||||
- nilerr
|
||||
- whitespace
|
||||
- misspell
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
- deadcode
|
||||
- exhaustivestruct
|
||||
- golint
|
||||
- govet
|
||||
- ifshort
|
||||
- interfacer
|
||||
- maligned
|
||||
- nosnakecase
|
||||
- scopelint
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- goerr113
|
||||
- gofumpt
|
||||
- gomnd
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- varnamelen
|
||||
- wastedassign
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
|
||||
111
BUILD.bazel
111
BUILD.bazel
@@ -4,6 +4,7 @@ load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
load("@prysm//tools/nogo_config:def.bzl", "nogo_config_exclude")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
|
||||
@@ -82,9 +83,115 @@ workspace_binary(
|
||||
cmd = "@com_github_golang_lint//golint",
|
||||
)
|
||||
|
||||
STATICCHECK_ANALYZERS = [
|
||||
# Enabled static checks. See https://staticcheck.dev/docs/checks/
|
||||
# Please. keep this list sorted. Don't be a bad person by inserting stuff randomly.
|
||||
"sa1000",
|
||||
"sa1001",
|
||||
"sa1002",
|
||||
"sa1003",
|
||||
"sa1004",
|
||||
"sa1005",
|
||||
"sa1006",
|
||||
"sa1007",
|
||||
"sa1008",
|
||||
"sa1010",
|
||||
"sa1011",
|
||||
"sa1012",
|
||||
"sa1013",
|
||||
"sa1014",
|
||||
"sa1015",
|
||||
"sa1016",
|
||||
"sa1017",
|
||||
"sa1018",
|
||||
# "sa1019", # TODO: Fix all uses of deprecated things.
|
||||
"sa1020",
|
||||
"sa1021",
|
||||
"sa1023",
|
||||
"sa1024",
|
||||
"sa1025",
|
||||
"sa1026",
|
||||
"sa1027",
|
||||
"sa1028",
|
||||
"sa1029",
|
||||
"sa1030",
|
||||
"sa2000",
|
||||
"sa2001",
|
||||
"sa2002",
|
||||
"sa2003",
|
||||
"sa3000",
|
||||
"sa3001",
|
||||
"sa4000",
|
||||
"sa4001",
|
||||
"sa4003",
|
||||
"sa4004",
|
||||
"sa4005",
|
||||
"sa4006",
|
||||
"sa4008",
|
||||
"sa4009",
|
||||
"sa4010",
|
||||
"sa4011",
|
||||
"sa4012",
|
||||
"sa4013",
|
||||
"sa4014",
|
||||
"sa4015",
|
||||
"sa4016",
|
||||
"sa4017",
|
||||
"sa4018",
|
||||
"sa4019",
|
||||
"sa4020",
|
||||
"sa4021",
|
||||
"sa4022",
|
||||
"sa4023",
|
||||
"sa4024",
|
||||
"sa4025",
|
||||
"sa4026",
|
||||
"sa4027",
|
||||
"sa4028",
|
||||
"sa4029",
|
||||
"sa4030",
|
||||
"sa4031",
|
||||
"sa4032",
|
||||
"sa5000",
|
||||
"sa5001",
|
||||
"sa5002",
|
||||
"sa5003",
|
||||
"sa5004",
|
||||
"sa5005",
|
||||
"sa5007",
|
||||
"sa5008",
|
||||
"sa5009",
|
||||
"sa5010",
|
||||
"sa5011",
|
||||
"sa5012",
|
||||
"sa6000",
|
||||
"sa6001",
|
||||
"sa6002",
|
||||
"sa6003",
|
||||
"sa6005",
|
||||
"sa6006",
|
||||
"sa9001",
|
||||
"sa9002",
|
||||
#"sa9003", # Doesn't build. See https://github.com/dominikh/go-tools/pull/1483
|
||||
"sa9004",
|
||||
"sa9005",
|
||||
"sa9006",
|
||||
"sa9007",
|
||||
"sa9008",
|
||||
]
|
||||
|
||||
nogo_config_exclude(
|
||||
name = "nogo_config_with_excludes",
|
||||
checks = [sa.upper() for sa in STATICCHECK_ANALYZERS],
|
||||
exclude_files = [
|
||||
"external/.*",
|
||||
],
|
||||
input = "nogo_config.json",
|
||||
)
|
||||
|
||||
nogo(
|
||||
name = "nogo",
|
||||
config = "nogo_config.json",
|
||||
config = ":nogo_config_with_excludes",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
|
||||
@@ -136,7 +243,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
|
||||
],
|
||||
}),
|
||||
}) + ["@co_honnef_go_tools//staticcheck/%s:go_default_library" % c for c in STATICCHECK_ANALYZERS],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
|
||||
|
||||
124
WORKSPACE
124
WORKSPACE
@@ -96,11 +96,22 @@ http_archive(
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||
name = "rules_distroless",
|
||||
sha256 = "e64f06e452cd153aeab81f752ccf4642955b3af319e64f7bc7a7c9252f76b10e",
|
||||
strip_prefix = "rules_distroless-f5e678217b57ce3ad2f1c0204bd4e9d416255773",
|
||||
url = "https://github.com/GoogleContainerTools/rules_distroless/archive/f5e678217b57ce3ad2f1c0204bd4e9d416255773.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_distroless//distroless:dependencies.bzl", "rules_distroless_dependencies")
|
||||
|
||||
rules_distroless_dependencies()
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
|
||||
aspect_bazel_lib_dependencies()
|
||||
|
||||
aspect_bazel_lib_register_toolchains()
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
@@ -126,10 +137,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -148,67 +159,16 @@ git_repository(
|
||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||
)
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
||||
container_repositories = "repositories",
|
||||
)
|
||||
|
||||
container_repositories()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//container:container.bzl",
|
||||
"container_pull",
|
||||
)
|
||||
|
||||
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
|
||||
container_pull(
|
||||
name = "cc_image_base_amd64",
|
||||
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/cc-debian11",
|
||||
)
|
||||
|
||||
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
|
||||
container_pull(
|
||||
name = "cc_debug_image_base_amd64",
|
||||
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/cc-debian11",
|
||||
)
|
||||
|
||||
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
|
||||
container_pull(
|
||||
name = "go_image_base_amd64",
|
||||
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/base-debian11",
|
||||
)
|
||||
|
||||
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
|
||||
container_pull(
|
||||
name = "go_debug_image_base_amd64",
|
||||
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
|
||||
registry = "gcr.io",
|
||||
repository = "distroless/base-debian11",
|
||||
)
|
||||
|
||||
container_pull(
|
||||
name = "alpine_cc_linux_amd64",
|
||||
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
|
||||
registry = "index.docker.io",
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:9b8e0854865dcaf49470b4ec305df45957020fbcf17b71eeb50ffd3bc5bf885d", # 2023-05-17
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64",
|
||||
"linux/arm64/v8",
|
||||
],
|
||||
reproducible = True,
|
||||
)
|
||||
@@ -222,7 +182,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.10",
|
||||
go_version = "1.21.5",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -263,7 +223,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0-beta.3"
|
||||
consensus_spec_version = "v1.4.0-beta.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -279,7 +239,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "67ae5b8fc368853da23d4297e480a4b7f4722fb970d1c7e2b6a5b7faef9cb907",
|
||||
sha256 = "9017ffff84d64a7c4c9e6ff9f421f9479f71d3b463b738f54e02158dbb4f50f0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -295,7 +255,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "82474f29fff4abd09fb1e71bafa98827e2573cf0ad02cf119610961831dc3bb5",
|
||||
sha256 = "f08711682553fe7c9362f1400ed8c56b2fa9576df08581fcad4c508ba8ad4788",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -311,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "60e4b6eb6c341daab7ee5614a8e3f28567247c504c593b951bfe919622c8ef8f",
|
||||
sha256 = "7ea3189e3879f2ac62467cbf2945c00b6c94d30cdefb2d645c630b1018c50e10",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -326,7 +286,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "fdab9756c93a250219ff6a10d5a9faee1e2e6878a14508410409e307362c6991",
|
||||
sha256 = "4119992a2efc79e5cb2bdc07ed08c0b1fa32332cbd0d88e6467f34938df97026",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -362,6 +322,22 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "goerli_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"prater/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
|
||||
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
|
||||
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -417,24 +393,6 @@ load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
@@ -305,7 +306,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shar
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.DisallowUnknownFields()
|
||||
errorJson := &shared.IndexedVerificationFailureError{}
|
||||
errorJson := &server.IndexedVerificationFailureError{}
|
||||
if err := decoder.Decode(errorJson); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||
}
|
||||
|
||||
@@ -268,11 +268,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
vs := make([]*shared.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
svrJson, err := shared.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to encode to SignedValidatorRegistration at index %d", i))
|
||||
}
|
||||
vs[i] = svrJson
|
||||
vs[i] = shared.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
@@ -427,26 +423,26 @@ func non200Err(response *http.Response) error {
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 204:
|
||||
case http.StatusNoContent:
|
||||
log.WithError(ErrNoContent).Debug(msg)
|
||||
return ErrNoContent
|
||||
case 400:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
log.WithError(ErrBadRequest).Debug(msg)
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrBadRequest, errMessage.Message)
|
||||
case 404:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
case http.StatusNotFound:
|
||||
log.WithError(ErrNotFound).Debug(msg)
|
||||
return errors.Wrap(ErrNotFound, errMessage.Message)
|
||||
case 500:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrNotFound, errMessage.Message)
|
||||
case http.StatusInternalServerError:
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrNotOK, errMessage.Message)
|
||||
default:
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
|
||||
@@ -275,7 +275,24 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("deneb, too many kzg commitments", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDenebTooManyBlobs)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
|
||||
})
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -412,7 +429,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||
})
|
||||
t.Run("not blinded", func(t *testing.T) {
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{}}})
|
||||
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
|
||||
require.ErrorIs(t, err, errNotBlinded)
|
||||
@@ -705,7 +722,7 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockDeneb {
|
||||
basebytes, err := shared.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
basebytes, err := bytesutil.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -357,6 +357,45 @@ func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCape
|
||||
}, nil
|
||||
}
|
||||
|
||||
func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, error) {
|
||||
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return ExecutionPayloadDeneb{}, err
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
|
||||
}
|
||||
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
|
||||
for i, w := range payload.Withdrawals {
|
||||
withdrawals[i] = Withdrawal{
|
||||
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
|
||||
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
|
||||
Address: bytesutil.SafeCopyBytes(w.Address),
|
||||
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
|
||||
}
|
||||
}
|
||||
return ExecutionPayloadDeneb{
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: Uint64String(payload.BlobGasUsed),
|
||||
ExcessBlobGas: Uint64String(payload.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
@@ -869,6 +908,9 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bb.BlobKzgCommitments) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, fmt.Errorf("too many blob commitments: %d", len(bb.BlobKzgCommitments))
|
||||
}
|
||||
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
|
||||
for i, commit := range bb.BlobKzgCommitments {
|
||||
if len(commit) != fieldparams.BLSPubkeyLength {
|
||||
@@ -1021,6 +1063,16 @@ type BlobsBundle struct {
|
||||
|
||||
// ToProto returns a BlobsBundle Proto.
|
||||
func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
|
||||
if len(b.Blobs) > fieldparams.MaxBlobCommitmentsPerBlock {
|
||||
return nil, fmt.Errorf("blobs length %d is more than max %d", len(b.Blobs), fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
}
|
||||
if len(b.Commitments) != len(b.Blobs) {
|
||||
return nil, fmt.Errorf("commitments length %d does not equal blobs length %d", len(b.Commitments), len(b.Blobs))
|
||||
}
|
||||
if len(b.Proofs) != len(b.Blobs) {
|
||||
return nil, fmt.Errorf("proofs length %d does not equal blobs length %d", len(b.Proofs), len(b.Blobs))
|
||||
}
|
||||
|
||||
commitments := make([][]byte, len(b.Commitments))
|
||||
for i := range b.Commitments {
|
||||
if len(b.Commitments[i]) != fieldparams.BLSPubkeyLength {
|
||||
@@ -1035,9 +1087,6 @@ func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
|
||||
}
|
||||
proofs[i] = bytesutil.SafeCopyBytes(b.Proofs[i])
|
||||
}
|
||||
if len(b.Blobs) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, fmt.Errorf("blobs length %d is more than max %d", len(b.Blobs), fieldparams.MaxBlobsPerBlock)
|
||||
}
|
||||
blobs := make([][]byte, len(b.Blobs))
|
||||
for i := range b.Blobs {
|
||||
if len(b.Blobs[i]) != fieldparams.BlobLength {
|
||||
@@ -1052,6 +1101,28 @@ func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FromBundleProto converts the proto bundle type to the builder
|
||||
// type.
|
||||
func FromBundleProto(bundle *v1.BlobsBundle) *BlobsBundle {
|
||||
commitments := make([]hexutil.Bytes, len(bundle.KzgCommitments))
|
||||
for i := range bundle.KzgCommitments {
|
||||
commitments[i] = bytesutil.SafeCopyBytes(bundle.KzgCommitments[i])
|
||||
}
|
||||
proofs := make([]hexutil.Bytes, len(bundle.Proofs))
|
||||
for i := range bundle.Proofs {
|
||||
proofs[i] = bytesutil.SafeCopyBytes(bundle.Proofs[i])
|
||||
}
|
||||
blobs := make([]hexutil.Bytes, len(bundle.Blobs))
|
||||
for i := range bundle.Blobs {
|
||||
blobs[i] = bytesutil.SafeCopyBytes(bundle.Blobs[i])
|
||||
}
|
||||
return &BlobsBundle{
|
||||
Commitments: commitments,
|
||||
Proofs: proofs,
|
||||
Blobs: blobs,
|
||||
}
|
||||
}
|
||||
|
||||
// ToProto returns ExecutionPayloadDeneb Proto and BlobsBundle Proto separately.
|
||||
func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.BlobsBundle, error) {
|
||||
if r.Data == nil {
|
||||
|
||||
@@ -38,8 +38,7 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
a, err := shared.SignedValidatorRegistrationFromConsensus(svr)
|
||||
require.NoError(t, err)
|
||||
a := shared.SignedValidatorRegistrationFromConsensus(svr)
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
@@ -210,6 +209,39 @@ var testExampleHeaderResponseUnknownVersion = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseDenebTooManyBlobs = `{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "2"
|
||||
},
|
||||
"blob_kzg_commitments": [
|
||||
"","","","","","",""
|
||||
],
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
||||
hr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||
@@ -629,6 +661,151 @@ var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
|
||||
}
|
||||
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
|
||||
var testExampleExecutionPayloadDenebTooManyBlobs = fmt.Sprintf(`{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"execution_payload":{
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"index": "1",
|
||||
"validator_index": "1",
|
||||
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"blob_gas_used": "2",
|
||||
"excess_blob_gas": "3"
|
||||
},
|
||||
"blobs_bundle": {
|
||||
"commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
|
||||
],
|
||||
"blobs": %s
|
||||
}
|
||||
}
|
||||
}`, beyondMaxEmptyBlobs())
|
||||
|
||||
func beyondMaxEmptyBlobs() string {
|
||||
moreThanMax := fieldparams.MaxBlobCommitmentsPerBlock + 2
|
||||
blobs := make([]string, moreThanMax)
|
||||
b, err := json.Marshal(blobs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var testExampleExecutionPayloadDenebDifferentCommitmentCount = fmt.Sprintf(`{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"execution_payload":{
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"index": "1",
|
||||
"validator_index": "1",
|
||||
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"blob_gas_used": "2",
|
||||
"excess_blob_gas": "3"
|
||||
},
|
||||
"blobs_bundle": {
|
||||
"commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f",
|
||||
"0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
|
||||
],
|
||||
"blobs": [
|
||||
"%s"
|
||||
]
|
||||
}
|
||||
}
|
||||
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
|
||||
var testExampleExecutionPayloadDenebDifferentProofCount = fmt.Sprintf(`{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
"execution_payload":{
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"index": "1",
|
||||
"validator_index": "1",
|
||||
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"blob_gas_used": "2",
|
||||
"excess_blob_gas": "3"
|
||||
},
|
||||
"blobs_bundle": {
|
||||
"commitments": [
|
||||
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
|
||||
],
|
||||
"proofs": [
|
||||
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a",
|
||||
"0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
],
|
||||
"blobs": [
|
||||
"%s"
|
||||
]
|
||||
}
|
||||
}
|
||||
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
|
||||
|
||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
@@ -1005,7 +1182,6 @@ func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
|
||||
},
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
|
||||
@@ -1084,7 +1260,27 @@ func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
|
||||
}
|
||||
|
||||
require.DeepEqual(t, blobsBundle, expectedBlobs)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebToProtoInvalidBlobCount(t *testing.T) {
|
||||
hr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDenebTooManyBlobs), hr))
|
||||
_, _, err := hr.ToProto()
|
||||
require.ErrorContains(t, fmt.Sprintf("blobs length %d is more than max %d", fieldparams.MaxBlobCommitmentsPerBlock+2, fieldparams.MaxBlobCommitmentsPerBlock), err)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebToProtoDifferentCommitmentCount(t *testing.T) {
|
||||
hr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDenebDifferentCommitmentCount), hr))
|
||||
_, _, err := hr.ToProto()
|
||||
require.ErrorContains(t, "commitments length 2 does not equal blobs length 1", err)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseDenebToProtoDifferentProofCount(t *testing.T) {
|
||||
hr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDenebDifferentProofCount), hr))
|
||||
_, _, err := hr.ToProto()
|
||||
require.ErrorContains(t, "proofs length 2 does not equal blobs length 1", err)
|
||||
}
|
||||
|
||||
func pbEth1Data() *eth.Eth1Data {
|
||||
|
||||
@@ -32,7 +32,7 @@ func Non200Err(response *http.Response) error {
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
|
||||
@@ -14,11 +14,11 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||
"github.com/rs/cors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -104,7 +104,7 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := g.corsMiddleware(g.cfg.router)
|
||||
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -158,17 +158,6 @@ func (g *Gateway) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: g.cfg.allowedOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
return c.Handler(h)
|
||||
}
|
||||
|
||||
// dial the gRPC server.
|
||||
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
|
||||
switch network {
|
||||
|
||||
@@ -3,16 +3,22 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"error_test.go",
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
|
||||
45
api/server/error.go
Normal file
45
api/server/error.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DecodeError represents an error resulting from trying to decode an HTTP request.
|
||||
// It tracks the full field name for which decoding failed.
|
||||
type DecodeError struct {
|
||||
path []string
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
|
||||
// The current field that failed decoding must be passed in.
|
||||
func NewDecodeError(err error, field string) *DecodeError {
|
||||
de, ok := err.(*DecodeError)
|
||||
if ok {
|
||||
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
|
||||
}
|
||||
return &DecodeError{path: []string{field}, err: err}
|
||||
}
|
||||
|
||||
// Error returns the formatted error message which contains the full field name and the actual decoding error.
|
||||
func (e *DecodeError) Error() string {
|
||||
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
|
||||
}
|
||||
|
||||
// IndexedVerificationFailureError wraps a collection of verification failures.
|
||||
type IndexedVerificationFailureError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
Failures []*IndexedVerificationFailure `json:"failures"`
|
||||
}
|
||||
|
||||
func (e *IndexedVerificationFailureError) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents an issue when verifying a single indexed object e.g. an item in an array.
|
||||
type IndexedVerificationFailure struct {
|
||||
Index int `json:"index"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
16
api/server/error_test.go
Normal file
16
api/server/error_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
)
|
||||
|
||||
func TestDecodeError(t *testing.T) {
|
||||
e := errors.New("not a number")
|
||||
de := NewDecodeError(e, "Z")
|
||||
de = NewDecodeError(de, "Y")
|
||||
de = NewDecodeError(de, "X")
|
||||
assert.Equal(t, "could not decode X.Y.Z: not a number", de.Error())
|
||||
}
|
||||
@@ -2,8 +2,12 @@ package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
@@ -13,3 +17,16 @@ func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain",
|
||||
@@ -52,7 +53,6 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
|
||||
@@ -75,6 +75,7 @@ type HeadFetcher interface {
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ChainHeads() ([][32]byte, []primitives.Slot)
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
@@ -22,6 +22,13 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// ShouldOverrideFCU returns the corresponding value from forkchoice
|
||||
func (s *Service) ShouldOverrideFCU() bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -32,21 +32,18 @@ const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
||||
type notifyForkchoiceUpdateArg struct {
|
||||
headState state.BeaconState
|
||||
headRoot [32]byte
|
||||
headBlock interfaces.ReadOnlyBeaconBlock
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkchoiceUpdateArg) (*enginev1.PayloadIDBytes, error) {
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
headBlk := arg.headBlock
|
||||
if arg.headBlock.IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
headBlk := arg.headBlock.Block()
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
@@ -72,11 +69,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||
@@ -122,10 +118,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return nil, nil
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: r,
|
||||
headBlock: b.Block(),
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, &fcuConfig{
|
||||
headState: st,
|
||||
headRoot: r,
|
||||
headBlock: b,
|
||||
attributes: arg.attributes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err // Returning err because it's recursive here.
|
||||
@@ -153,7 +150,9 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
// If the forkchoice update call has an attribute, update the payload ID cache.
|
||||
hasAttr := arg.attributes != nil && !arg.attributes.IsEmpty()
|
||||
nextSlot := s.CurrentSlot() + 1
|
||||
if hasAttr && payloadID != nil {
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
@@ -162,7 +161,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
"headSlot": headBlk.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
@@ -277,56 +276,50 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
// If it is an epoch boundary then process slots to get the right
|
||||
// shuffling before checking if the proposer is tracked. Otherwise
|
||||
// perform this check before. This is cheap as the NSC has already been updated.
|
||||
var val cache.TrackedValidator
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
// Get previous randao.
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
@@ -335,51 +328,51 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
default:
|
||||
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
|
||||
return false, emptyAttri, 0
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return true, attr, proposerID
|
||||
return attr
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -26,11 +26,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
@@ -57,11 +56,14 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
sb := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pid := &v1.PayloadIDBytes{1}
|
||||
@@ -73,20 +75,20 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
// Intentionally generate a bad state such that `hash_tree_root` fails during `process_slot`
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
arg := &fcuConfig{
|
||||
headState: s,
|
||||
headRoot: [32]byte{},
|
||||
headBlock: b,
|
||||
}
|
||||
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(1, 0, [8]byte{}, [32]byte{})
|
||||
service.cfg.PayloadIDCache.Set(1, [32]byte{}, [8]byte{})
|
||||
got, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got, pid) // We still get a payload ID even though the state is bad. This means it returns until the end.
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
@@ -114,7 +116,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk interfaces.ReadOnlyBeaconBlock
|
||||
blk interfaces.ReadOnlySignedBeaconBlock
|
||||
headRoot [32]byte
|
||||
finalizedRoot [32]byte
|
||||
justifiedRoot [32]byte
|
||||
@@ -123,24 +125,24 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "phase0 block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}})
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "altair block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}})
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "not execution block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
@@ -153,19 +155,19 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
})
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "happy case: finalized root is altair block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
@@ -174,12 +176,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "happy case: finalized root is bellatrix block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
@@ -188,12 +190,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "forkchoice updated with optimistic block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
@@ -203,12 +205,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "forkchoice updated with invalid block",
|
||||
blk: func() interfaces.ReadOnlyBeaconBlock {
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
@@ -226,7 +228,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
arg := &fcuConfig{
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
@@ -246,7 +248,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
// Prepare blocks
|
||||
@@ -306,9 +308,9 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
a := &fcuConfig{
|
||||
headState: st,
|
||||
headBlock: wbd.Block(),
|
||||
headBlock: wbd,
|
||||
headRoot: brd,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
@@ -334,7 +336,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
// Prepare blocks
|
||||
@@ -443,9 +445,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
a := &fcuConfig{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headBlock: wbg,
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
@@ -467,7 +469,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -709,7 +711,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
@@ -777,83 +779,70 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := primitives.ValidatorIndex(1)
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := primitives.ValidatorIndex(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
@@ -861,35 +850,30 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeDeneb(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vId)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := primitives.ValidatorIndex(1)
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -8,20 +8,15 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) isNewProposer(slot primitives.Slot) bool {
|
||||
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
return ok || features.Get().PrepareAllPayloads
|
||||
}
|
||||
|
||||
func (s *Service) isNewHead(r [32]byte) bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
@@ -49,48 +44,34 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
return headState, newHeadBlock, nil
|
||||
}
|
||||
|
||||
type fcuConfig struct {
|
||||
headState state.BeaconState
|
||||
headBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
headRoot [32]byte
|
||||
proposingSlot primitives.Slot
|
||||
attributes payloadattribute.Attributer
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
// it returns true if the new head is updated
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
|
||||
isNewHead := s.isNewHead(newHeadRoot)
|
||||
if !isNewHead {
|
||||
return false, nil
|
||||
}
|
||||
isNewProposer := s.isNewProposer(proposingSlot)
|
||||
if isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return false, nil
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
if err := s.pruneAttsFromPool(args.headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -17,15 +17,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestService_isNewProposer(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
|
||||
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
|
||||
}
|
||||
|
||||
func TestService_isNewHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
@@ -67,33 +58,14 @@ func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "could not get state summary: could not find block in DB"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
service.cfg.PayloadIDCache = cache.NewPayloadIDCache()
|
||||
service.cfg.TrackedValidatorsCache = cache.NewTrackedValidatorsCache()
|
||||
|
||||
hook.Reset()
|
||||
service.head = &head{
|
||||
root: [32]byte{'a'},
|
||||
block: nil, /* should not panic if notify head uses correct head */
|
||||
}
|
||||
|
||||
// Block in Cache
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -107,13 +79,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
block: wsb,
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
// Block in DB
|
||||
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
|
||||
b = util.NewBeaconBlock()
|
||||
b.Block.Slot = 3
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
@@ -125,25 +91,22 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
block: wsb,
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
|
||||
args := &fcuConfig{
|
||||
headState: st,
|
||||
headRoot: r1,
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.PayloadID{1}, payloadID)
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
@@ -182,10 +145,14 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
service.head.root = r
|
||||
service.head.block = sb
|
||||
service.head.state = st
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.PayloadIDCache.Set(service.CurrentSlot()+1, [32]byte{} /* root */, [8]byte{})
|
||||
args := &fcuConfig{
|
||||
headState: st,
|
||||
headBlock: sb,
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -276,7 +276,7 @@ func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state.Copy()
|
||||
@@ -286,7 +286,7 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
// It does not perform a copy of the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state
|
||||
|
||||
@@ -73,7 +73,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -93,7 +93,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
|
||||
@@ -69,10 +69,18 @@ func WithDepositCache(c cache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposerIdsCache for proposer id cache.
|
||||
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
|
||||
// WithPayloadIDCache for payload ID cache.
|
||||
func WithPayloadIDCache(c *cache.PayloadIDCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ProposerSlotIndexCache = c
|
||||
s.cfg.PayloadIDCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTrackedValidatorsCache for tracked validators cache.
|
||||
func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.TrackedValidatorsCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -165,7 +173,7 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockSychronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
|
||||
// WithClockSynchronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
|
||||
// the genesis timestamp is known (ClockWaiter) or which determine the genesis timestamp (ClockSetter).
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -235,14 +235,14 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
_, err = service.getAttPreState(ctx, cp2)
|
||||
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
s2, err = service.getAttPreState(ctx, cp2)
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
@@ -73,6 +73,9 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
proposingSlot := s.CurrentSlot() + 1
|
||||
var fcuArgs *fcuConfig
|
||||
if blockRoot != headRoot {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
@@ -88,21 +91,63 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"headWeight": headWeight,
|
||||
}).Debug("Head block is not the received block")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
fcuArgs = &fcuConfig{
|
||||
headState: headState,
|
||||
headBlock: headBlock,
|
||||
headRoot: headRoot,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
} else {
|
||||
fcuArgs = &fcuConfig{
|
||||
headState: postState,
|
||||
headBlock: signed,
|
||||
headRoot: headRoot,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
// verify conditions for FCU, notifies FCU, and saves the new head.
|
||||
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
|
||||
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||
return err
|
||||
isEarly := slots.WithinVotingWindow(uint64(s.genesisTime.Unix()))
|
||||
shouldOverrideFCU := false
|
||||
slot := postState.Slot()
|
||||
if s.isNewHead(headRoot) {
|
||||
// if the block is early send FCU without any payload attributes
|
||||
if isEarly {
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// if the block is late lock and update the caches
|
||||
if blockRoot == headRoot {
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if slots.IsEpochEnd(slot) {
|
||||
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
|
||||
return errors.Wrap(err, "could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
}
|
||||
_, tracked := s.trackedProposer(fcuArgs.headState, proposingSlot)
|
||||
if tracked {
|
||||
shouldOverrideFCU = s.shouldOverrideFCU(headRoot, proposingSlot)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, fcuArgs.headState, proposingSlot, headRoot[:])
|
||||
}
|
||||
if !shouldOverrideFCU {
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -114,32 +159,30 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
if headRoot == blockRoot {
|
||||
// Updating next slot state cache can happen in the background
|
||||
// except in the epoch boundary in which case we lock to handle
|
||||
// the shuffling and proposer caches updates.
|
||||
// We handle these caches only on canonical
|
||||
// blocks, otherwise this will be handled by lateBlockTasks
|
||||
slot := postState.Slot()
|
||||
if slots.IsEpochEnd(slot) {
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
if blockRoot == headRoot && isEarly {
|
||||
go func() {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Error("could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
|
||||
return errors.Wrap(err, "could not handle epoch boundary")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Error("could not update next slot state cache")
|
||||
if slots.IsEpochEnd(slot) {
|
||||
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
|
||||
log.WithError(err).Error("could not handle epoch boundary")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if _, tracked := s.trackedProposer(fcuArgs.headState, proposingSlot); !tracked {
|
||||
return
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, fcuArgs.headState, proposingSlot, headRoot[:])
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}()
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
@@ -322,10 +365,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB.Block(),
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
@@ -381,10 +424,24 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
|
||||
log.WithError(err).Warn("Could not update committee cache")
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(slotCtx, st, e+1); err != nil {
|
||||
log.WithError(err).Warn("Failed to cache next epoch proposers")
|
||||
}
|
||||
}()
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
// epoch as key
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e-1)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -651,16 +708,23 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
// handleEpochBoundary requires a forkchoice lock to obtain the target root.
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
}
|
||||
// Head root should be empty when retrieving proposer index for the next slot.
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
_, tracked := s.trackedProposer(headState, s.CurrentSlot()+1)
|
||||
// return early if we are not proposing next slot.
|
||||
if !tracked {
|
||||
return
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
|
||||
if has {
|
||||
return
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -670,11 +734,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
headBlock: headBlock,
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -895,7 +895,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
aHash := common.BytesToHash([]byte("a"))
|
||||
@@ -2045,12 +2045,12 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) + delay
|
||||
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -121,36 +120,45 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
// This function is only called at 10 seconds or 0 seconds into the slot
|
||||
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
|
||||
if !features.Get().DisableReorgLateBlocks {
|
||||
disparity += reorgLateBlockCountAttestations
|
||||
}
|
||||
disparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
||||
disparity += reorgLateBlockCountAttestations
|
||||
|
||||
s.processAttestations(ctx, disparity)
|
||||
|
||||
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
// return early if we haven't changed head
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute head from new attestations")
|
||||
// Fallback to our current head root in the event of a failure.
|
||||
s.headLock.RLock()
|
||||
newHeadRoot = s.headRoot()
|
||||
s.headLock.RUnlock()
|
||||
return
|
||||
}
|
||||
if !s.isNewHead(newHeadRoot) {
|
||||
return
|
||||
}
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
if changed {
|
||||
s.headLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
s.headLock.RUnlock()
|
||||
_, tracked := s.trackedProposer(headState, proposingSlot)
|
||||
if tracked {
|
||||
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the blocroot `root` is ready in the database
|
||||
// for the block root `root` is ready in the database
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.notifyIndex(root, index)
|
||||
}
|
||||
|
||||
@@ -106,9 +106,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daStartTime := time.Now()
|
||||
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
@@ -120,7 +124,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if coreTime.CurrentEpoch(postState) > currentEpoch {
|
||||
if coreTime.CurrentEpoch(postState) > currentEpoch && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
headSt, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
@@ -171,7 +175,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
@@ -183,7 +187,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -130,7 +131,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
s, tr := minimalTestService(t,
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
)
|
||||
|
||||
beaconDB := tr.db
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
|
||||
@@ -73,7 +73,8 @@ type config struct {
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache cache.DepositCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
@@ -167,7 +168,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
|
||||
@@ -100,7 +100,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -114,6 +115,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithAttestationService(req.attSrv),
|
||||
WithBLSToExecPool(req.blsPool),
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -73,6 +73,7 @@ type ChainService struct {
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
TargetRoot [32]byte
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -617,3 +618,8 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
27
beacon-chain/blockchain/tracked_proposer.go
Normal file
27
beacon-chain/blockchain/tracked_proposer.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// trackedProposer returns whether the beacon node was informed, via the
|
||||
// validators/prepare_proposer endpoint, of the proposer at the given slot.
|
||||
// It only returns true if the tracked proposer is present and active.
|
||||
func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
|
||||
if features.Get().PrepareAllPayloads {
|
||||
return cache.TrackedValidator{Active: true}, true
|
||||
}
|
||||
id, err := helpers.BeaconProposerIndexAtSlot(s.ctx, st, slot)
|
||||
if err != nil {
|
||||
return cache.TrackedValidator{}, false
|
||||
}
|
||||
val, ok := s.cfg.TrackedValidatorsCache.Validator(id)
|
||||
if !ok {
|
||||
return cache.TrackedValidator{}, false
|
||||
}
|
||||
return val, val.Active
|
||||
}
|
||||
4
beacon-chain/cache/BUILD.bazel
vendored
4
beacon-chain/cache/BUILD.bazel
vendored
@@ -25,6 +25,7 @@ go_library(
|
||||
"sync_committee_disabled.go", # keep
|
||||
"sync_committee_head_state.go",
|
||||
"sync_subnet_ids.go",
|
||||
"tracked_validators.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache",
|
||||
visibility = [
|
||||
@@ -33,6 +34,7 @@ go_library(
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -78,6 +80,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -90,6 +93,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
172
beacon-chain/cache/attestation_data.go
vendored
172
beacon-chain/cache/attestation_data.go
vendored
@@ -1,171 +1,41 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
// Delay parameters
|
||||
minDelay = float64(10) // 10 nanoseconds
|
||||
maxDelay = float64(100000000) // 0.1 second
|
||||
delayFactor = 1.1
|
||||
type AttestationConsensusData struct {
|
||||
Slot primitives.Slot
|
||||
HeadRoot []byte
|
||||
Target forkchoicetypes.Checkpoint
|
||||
Source forkchoicetypes.Checkpoint
|
||||
}
|
||||
|
||||
// Metrics
|
||||
attestationCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_cache_miss",
|
||||
Help: "The number of attestation data requests that aren't present in the cache.",
|
||||
})
|
||||
attestationCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_cache_hit",
|
||||
Help: "The number of attestation data requests that are present in the cache.",
|
||||
})
|
||||
attestationCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "attestation_cache_size",
|
||||
Help: "The number of attestation data in the attestations cache",
|
||||
})
|
||||
)
|
||||
|
||||
// ErrAlreadyInProgress appears when attempting to mark a cache as in progress while it is
|
||||
// already in progress. The client should handle this error and wait for the in progress
|
||||
// data to resolve via Get.
|
||||
var ErrAlreadyInProgress = errors.New("already in progress")
|
||||
|
||||
// AttestationCache is used to store the cached results of an AttestationData request.
|
||||
// AttestationCache stores cached results of AttestationData requests.
|
||||
type AttestationCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
a *AttestationConsensusData
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewAttestationCache initializes the map and underlying cache.
|
||||
// NewAttestationCache creates a new instance of AttestationCache.
|
||||
func NewAttestationCache() *AttestationCache {
|
||||
return &AttestationCache{
|
||||
cache: cache.NewFIFO(wrapperToKey),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
return &AttestationCache{}
|
||||
}
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
if req == nil {
|
||||
return nil, errors.New("nil attestation data request")
|
||||
}
|
||||
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
delay := minDelay
|
||||
|
||||
// Another identical request may be in progress already. Let's wait until
|
||||
// any in progress request resolves or our timeout is exceeded.
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[s] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
|
||||
// This increasing backoff is to decrease the CPU cycles while waiting
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
}
|
||||
|
||||
item, exists, err := c.cache.GetByKey(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
return ethpb.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
// Get retrieves cached attestation data, recording a cache hit or miss. This method is lock free.
|
||||
func (c *AttestationCache) Get() *AttestationConsensusData {
|
||||
return c.a
|
||||
}
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *AttestationCache) MarkInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return e
|
||||
// Put adds a response to the cache. This method is lock free.
|
||||
func (c *AttestationCache) Put(a *AttestationConsensusData) error {
|
||||
if a == nil {
|
||||
return errors.New("attestation cannot be nil")
|
||||
}
|
||||
if c.inProgress[s] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[s] = true
|
||||
c.a = a
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
delete(c.inProgress, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(_ context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
}
|
||||
if err := c.cache.AddIfNotPresent(data); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.cache, maxCacheSize)
|
||||
|
||||
attestationCacheSize.Set(float64(len(c.cache.List())))
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapperToKey(i interface{}) (string, error) {
|
||||
w, ok := i.(*attestationReqResWrapper)
|
||||
if !ok {
|
||||
return "", errors.New("key is not of type *attestationReqResWrapper")
|
||||
}
|
||||
if w == nil {
|
||||
return "", errors.New("nil wrapper")
|
||||
}
|
||||
if w.req == nil {
|
||||
return "", errors.New("nil wrapper.request")
|
||||
}
|
||||
return reqToKey(w.req)
|
||||
}
|
||||
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
return fmt.Sprintf("%d", req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
req *ethpb.AttestationDataRequest
|
||||
res *ethpb.AttestationData
|
||||
}
|
||||
|
||||
64
beacon-chain/cache/attestation_data_test.go
vendored
64
beacon-chain/cache/attestation_data_test.go
vendored
@@ -1,41 +1,53 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewAttestationCache()
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
a := c.Get()
|
||||
require.Nil(t, a)
|
||||
|
||||
insert := &cache.AttestationConsensusData{
|
||||
Slot: 1,
|
||||
HeadRoot: []byte{1},
|
||||
Target: forkchoicetypes.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: [32]byte{3},
|
||||
},
|
||||
Source: forkchoicetypes.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: [32]byte{5},
|
||||
},
|
||||
}
|
||||
err := c.Put(insert)
|
||||
require.NoError(t, err)
|
||||
|
||||
a = c.Get()
|
||||
require.Equal(t, insert, a)
|
||||
|
||||
insert = &cache.AttestationConsensusData{
|
||||
Slot: 6,
|
||||
HeadRoot: []byte{7},
|
||||
Target: forkchoicetypes.Checkpoint{
|
||||
Epoch: 8,
|
||||
Root: [32]byte{9},
|
||||
},
|
||||
Source: forkchoicetypes.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: [32]byte{11},
|
||||
},
|
||||
}
|
||||
|
||||
response, err := c.Get(ctx, req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.AttestationData)(nil), response)
|
||||
err = c.Put(insert)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, c.MarkInProgress(req))
|
||||
|
||||
res := ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 5, Root: make([]byte, 32)},
|
||||
}
|
||||
|
||||
assert.NoError(t, c.Put(ctx, req, res))
|
||||
assert.NoError(t, c.MarkNotInProgress(req))
|
||||
|
||||
response, err = c.Get(ctx, req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !proto.Equal(response, res) {
|
||||
t.Error("Expected equal protos to return from cache")
|
||||
}
|
||||
a = c.Get()
|
||||
require.Equal(t, insert, a)
|
||||
}
|
||||
|
||||
7
beacon-chain/cache/common.go
vendored
7
beacon-chain/cache/common.go
vendored
@@ -1,16 +1,9 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = uint64(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
)
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
|
||||
|
||||
@@ -70,7 +70,7 @@ func New() (*DepositCache, error) {
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -104,7 +104,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -124,7 +124,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -176,7 +176,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -220,7 +220,7 @@ func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -236,7 +236,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -257,7 +257,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -271,7 +271,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedD
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -292,7 +292,7 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedI
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -314,7 +314,10 @@ func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
|
||||
return fd.deposits
|
||||
if fd.deposits != nil {
|
||||
return fd.deposits
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
|
||||
@@ -29,7 +29,7 @@ type PendingDepositsFetcher interface {
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -66,7 +66,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -90,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
@@ -128,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
|
||||
@@ -76,7 +76,7 @@ func (c *Cache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (c *Cache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.AllDepositContainers")
|
||||
_, span := trace.StartSpan(ctx, "Cache.AllDepositContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
@@ -101,7 +101,7 @@ func (c *Cache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContai
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (c *Cache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.DepositByPubkey")
|
||||
_, span := trace.StartSpan(ctx, "Cache.DepositByPubkey")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
@@ -123,7 +123,7 @@ func (c *Cache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Depo
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (c *Cache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.DepositsNumberAndRootAtHeight")
|
||||
_, span := trace.StartSpan(ctx, "Cache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
@@ -141,7 +141,7 @@ func (c *Cache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (c *Cache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.FinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "Cache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
@@ -159,7 +159,7 @@ func (c *Cache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits,
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.NonFinalizedDeposits")
|
||||
_, span := trace.StartSpan(ctx, "Cache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
@@ -180,7 +180,7 @@ func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
@@ -202,7 +202,7 @@ func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
@@ -227,7 +227,7 @@ func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertPendingDeposit")
|
||||
_, span := trace.StartSpan(ctx, "Cache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -282,7 +282,7 @@ func (c *Cache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (c *Cache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.PendingContainers")
|
||||
_, span := trace.StartSpan(ctx, "Cache.PendingContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.RLock()
|
||||
defer c.depositsLock.RUnlock()
|
||||
|
||||
@@ -28,6 +28,9 @@ var (
|
||||
func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertDeposit")
|
||||
defer span.End()
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
@@ -60,7 +63,7 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (c *Cache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "Cache.InsertDepositContainers")
|
||||
_, span := trace.StartSpan(ctx, "Cache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
@@ -89,6 +92,10 @@ func (c *Cache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex in
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
depositTrie := c.finalizedDeposits.depositTree
|
||||
insertIndex := int(c.finalizedDeposits.MerkleTrieIndex() + 1)
|
||||
|
||||
|
||||
5
beacon-chain/cache/error.go
vendored
5
beacon-chain/cache/error.go
vendored
@@ -14,4 +14,9 @@ var (
|
||||
errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
|
||||
// ErrNotFoundRegistration when validator registration does not exist in cache.
|
||||
ErrNotFoundRegistration = errors.Wrap(ErrNotFound, "no validator registered")
|
||||
|
||||
// ErrAlreadyInProgress appears when attempting to mark a cache as in progress while it is
|
||||
// already in progress. The client should handle this error and wait for the in progress
|
||||
// data to resolve via Get.
|
||||
ErrAlreadyInProgress = errors.New("already in progress")
|
||||
)
|
||||
|
||||
111
beacon-chain/cache/payload_id.go
vendored
111
beacon-chain/cache/payload_id.go
vendored
@@ -1,94 +1,63 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const keyLength = 40
|
||||
const vIdLength = 8
|
||||
const pIdLength = 8
|
||||
const vpIdsLength = vIdLength + pIdLength
|
||||
// RootToPayloadIDMap is a map with keys the head root and values the
|
||||
// corresponding PayloadID
|
||||
type RootToPayloadIDMap map[[32]byte]primitives.PayloadID
|
||||
|
||||
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
|
||||
// The key is the concatenation of the slot and the block root.
|
||||
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
|
||||
type ProposerPayloadIDsCache struct {
|
||||
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
|
||||
sync.RWMutex
|
||||
// PayloadIDCache is a cache that keeps track of the prepared payload ID for the
|
||||
// given slot and with the given head root.
|
||||
type PayloadIDCache struct {
|
||||
slotToPayloadID map[primitives.Slot]RootToPayloadIDMap
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
|
||||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
|
||||
return &ProposerPayloadIDsCache{
|
||||
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
|
||||
}
|
||||
// NewPayloadIDCache returns a new payload ID cache
|
||||
func NewPayloadIDCache() *PayloadIDCache {
|
||||
return &PayloadIDCache{slotToPayloadID: make(map[primitives.Slot]RootToPayloadIDMap)}
|
||||
}
|
||||
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
|
||||
slot primitives.Slot,
|
||||
r [fieldparams.RootLength]byte,
|
||||
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
|
||||
// PayloadID returns the payload ID for the given slot and parent block root
|
||||
func (p *PayloadIDCache) PayloadID(slot primitives.Slot, root [32]byte) (primitives.PayloadID, bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
inner, ok := p.slotToPayloadID[slot]
|
||||
if !ok {
|
||||
return 0, [pIdLength]byte{}, false
|
||||
return primitives.PayloadID{}, false
|
||||
}
|
||||
vId := ids[:vIdLength]
|
||||
|
||||
b := ids[vIdLength:]
|
||||
var pId [pIdLength]byte
|
||||
copy(pId[:], b)
|
||||
|
||||
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
|
||||
pid, ok := inner[root]
|
||||
if !ok {
|
||||
return primitives.PayloadID{}, false
|
||||
}
|
||||
return pid, true
|
||||
}
|
||||
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
|
||||
slot primitives.Slot,
|
||||
vId primitives.ValidatorIndex,
|
||||
pId [pIdLength]byte,
|
||||
r [fieldparams.RootLength]byte,
|
||||
) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var vIdBytes [vIdLength]byte
|
||||
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
|
||||
|
||||
var bs [vpIdsLength]byte
|
||||
copy(bs[:], append(vIdBytes[:], pId[:]...))
|
||||
|
||||
k := idKey(slot, r)
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[k]
|
||||
// Ok to overwrite if the slot is already set but the cached payload ID is not set.
|
||||
// This combats the re-org case where payload assignment could change at the start of the epoch.
|
||||
var byte8 [vIdLength]byte
|
||||
if !ok || (ok && bytes.Equal(ids[vIdLength:], byte8[:])) {
|
||||
f.slotToProposerAndPayloadIDs[k] = bs
|
||||
// SetPayloadID updates the payload ID for the given slot and head root
|
||||
// it also prunes older entries in the cache
|
||||
func (p *PayloadIDCache) Set(slot primitives.Slot, root [32]byte, pid primitives.PayloadID) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if slot > 1 {
|
||||
p.prune(slot - 2)
|
||||
}
|
||||
inner, ok := p.slotToPayloadID[slot]
|
||||
if !ok {
|
||||
inner = make(RootToPayloadIDMap)
|
||||
p.slotToPayloadID[slot] = inner
|
||||
}
|
||||
inner[root] = pid
|
||||
}
|
||||
|
||||
// PrunePayloadIDs removes the payload ID entries older than input slot.
|
||||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
for k := range f.slotToProposerAndPayloadIDs {
|
||||
s := primitives.Slot(bytesutil.BytesToUint64BigEndian(k[:8]))
|
||||
if slot > s {
|
||||
delete(f.slotToProposerAndPayloadIDs, k)
|
||||
// Prune prunes old payload IDs. Requires a Lock in the cache
|
||||
func (p *PayloadIDCache) prune(slot primitives.Slot) {
|
||||
for key := range p.slotToPayloadID {
|
||||
if key < slot {
|
||||
delete(p.slotToPayloadID, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
|
||||
var k [keyLength]byte
|
||||
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
|
||||
return k
|
||||
}
|
||||
|
||||
53
beacon-chain/cache/payload_id_test.go
vendored
53
beacon-chain/cache/payload_id_test.go
vendored
@@ -8,65 +8,54 @@ import (
|
||||
)
|
||||
|
||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||
cache := NewProposerPayloadIDsCache()
|
||||
cache := NewPayloadIDCache()
|
||||
var r [32]byte
|
||||
i, p, ok := cache.GetProposerPayloadIDs(0, r)
|
||||
p, ok := cache.PayloadID(0, r)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
require.Equal(t, primitives.PayloadID{}, p)
|
||||
|
||||
slot := primitives.Slot(1234)
|
||||
vid := primitives.ValidatorIndex(34234324)
|
||||
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
|
||||
pid := primitives.PayloadID{1, 2, 3, 3, 7, 8, 7, 8}
|
||||
r = [32]byte{1, 2, 3}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid, r)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
cache.Set(slot, r, pid)
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
slot = primitives.Slot(9456456)
|
||||
vid = primitives.ValidatorIndex(6786745)
|
||||
r = [32]byte{4, 5, 6}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{}, r)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
cache.Set(slot, r, primitives.PayloadID{})
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
require.Equal(t, primitives.PayloadID{}, p)
|
||||
|
||||
// reset cache without pid
|
||||
slot = primitives.Slot(9456456)
|
||||
vid = primitives.ValidatorIndex(11111)
|
||||
r = [32]byte{7, 8, 9}
|
||||
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid, r)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
cache.Set(slot, r, pid)
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
// Forked chain
|
||||
r = [32]byte{1, 2, 3}
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
require.Equal(t, primitives.PayloadID{}, p)
|
||||
|
||||
// existing pid - no change in cache
|
||||
// existing pid - change the cache
|
||||
slot = primitives.Slot(9456456)
|
||||
vid = primitives.ValidatorIndex(11111)
|
||||
r = [32]byte{7, 8, 9}
|
||||
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, newPid, r)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
newPid := primitives.PayloadID{1, 2, 3, 33, 72, 8, 7, 1}
|
||||
cache.Set(slot, r, newPid)
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
require.Equal(t, newPid, p)
|
||||
|
||||
// remove cache entry
|
||||
cache.PrunePayloadIDs(slot + 1)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot, r)
|
||||
cache.prune(slot + 1)
|
||||
p, ok = cache.PayloadID(slot, r)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
require.Equal(t, primitives.PayloadID{}, p)
|
||||
}
|
||||
|
||||
145
beacon-chain/cache/proposer_indices.go
vendored
145
beacon-chain/cache/proposer_indices.go
vendored
@@ -7,15 +7,12 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxProposerIndicesCacheSize defines the max number of proposer indices on per block root basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxProposerIndicesCacheSize = uint64(8)
|
||||
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
@@ -28,85 +25,97 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// ProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
// ProposerIndicesCache keeps track of the proposer indices in the next two
|
||||
// epochs. It is keyed by the state root of the last epoch before. That is, for
|
||||
// blocks during epoch 2, for example slot 65, it will be keyed by the state
|
||||
// root of slot 63 (last slot in epoch 1).
|
||||
// The cache keeps two sets of indices computed, the "safe" set is computed
|
||||
// right before the epoch transition into the current epoch. For example for
|
||||
// epoch 2 we will compute this list after importing block 63. The "unsafe"
|
||||
// version is computed an epoch in advance, for example for epoch 3, it will be
|
||||
// computed after importing block 63.
|
||||
//
|
||||
// The cache also keeps a map from checkpoints to state roots so that one is
|
||||
// able to access the proposer indices list from a checkpoint instead. The
|
||||
// checkpoint is the checkpoint for the epoch previous to the requested
|
||||
// proposer indices. That is, for a slot in epoch 2 (eg. 65), the checkpoint
|
||||
// root would be for slot 32 if present.
|
||||
type ProposerIndicesCache struct {
|
||||
proposerIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
sync.Mutex
|
||||
indices map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
rootMap map[forkchoicetypes.Checkpoint][32]byte // A map from checkpoint root to state root
|
||||
}
|
||||
|
||||
// proposerIndicesKeyFn takes the block root as the key to retrieve proposer indices in a given epoch.
|
||||
func proposerIndicesKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*ProposerIndices)
|
||||
if !ok {
|
||||
return "", ErrNotProposerIndices
|
||||
}
|
||||
|
||||
return key(info.BlockRoot), nil
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
// NewProposerIndicesCache returns a newly created cache
|
||||
func NewProposerIndicesCache() *ProposerIndicesCache {
|
||||
c := &ProposerIndicesCache{}
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
// Clear resets the ProposerIndicesCache to its initial state
|
||||
func (c *ProposerIndicesCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.proposerIndicesCache = cache.NewFIFO(proposerIndicesKeyFn)
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *ProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.proposerIndicesCache.AddIfNotPresent(p); err != nil {
|
||||
return err
|
||||
return &ProposerIndicesCache{
|
||||
indices: make(map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex),
|
||||
rootMap: make(map[forkchoicetypes.Checkpoint][32]byte),
|
||||
}
|
||||
trim(c.proposerIndicesCache, maxProposerIndicesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
_, exists, err := c.proposerIndicesCache.GetByKey(key(r))
|
||||
if err != nil {
|
||||
return false, err
|
||||
// ProposerIndices returns the proposer indices (safe) for the given root
|
||||
func (p *ProposerIndicesCache) ProposerIndices(epoch primitives.Epoch, root [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
inner, ok := p.indices[epoch]
|
||||
if !ok {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.proposerIndicesCache.GetByKey(key(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indices, exists := inner[root]
|
||||
if exists {
|
||||
ProposerIndicesCacheHit.Inc()
|
||||
} else {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return indices, exists
|
||||
}
|
||||
|
||||
item, ok := obj.(*ProposerIndices)
|
||||
// Prune resets the ProposerIndicesCache to its initial state
|
||||
func (p *ProposerIndicesCache) Prune(epoch primitives.Epoch) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
for key := range p.indices {
|
||||
if key < epoch {
|
||||
delete(p.indices, key)
|
||||
}
|
||||
}
|
||||
for key := range p.rootMap {
|
||||
if key.Epoch+1 < epoch {
|
||||
delete(p.rootMap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set sets the proposer indices for the given root as key
|
||||
func (p *ProposerIndicesCache) Set(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
inner, ok := p.indices[epoch]
|
||||
if !ok {
|
||||
return nil, ErrNotProposerIndices
|
||||
inner = make(map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex)
|
||||
p.indices[epoch] = inner
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
inner[root] = indices
|
||||
}
|
||||
|
||||
// Len returns the number of keys in the underlying cache.
|
||||
func (c *ProposerIndicesCache) Len() int {
|
||||
return len(c.proposerIndicesCache.ListKeys())
|
||||
// SetCheckpoint updates the map from checkpoints to state roots
|
||||
func (p *ProposerIndicesCache) SetCheckpoint(c forkchoicetypes.Checkpoint, root [32]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.rootMap[c] = root
|
||||
}
|
||||
|
||||
// IndicesFromCheckpoint returns the proposer indices from a checkpoint rather than the state root
|
||||
func (p *ProposerIndicesCache) IndicesFromCheckpoint(c forkchoicetypes.Checkpoint) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
p.Lock()
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
root, ok := p.rootMap[c]
|
||||
p.Unlock()
|
||||
if !ok {
|
||||
return emptyIndices, ok
|
||||
}
|
||||
return p.ProposerIndices(c.Epoch+1, root)
|
||||
}
|
||||
|
||||
54
beacon-chain/cache/proposer_indices_disabled.go
vendored
54
beacon-chain/cache/proposer_indices_disabled.go
vendored
@@ -3,7 +3,26 @@
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
Help: "The number of proposer indices requests that aren't present in the cache.",
|
||||
})
|
||||
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
|
||||
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_hit",
|
||||
Help: "The number of proposer indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type FakeProposerIndicesCache struct {
|
||||
@@ -14,26 +33,31 @@ func NewProposerIndicesCache() *FakeProposerIndicesCache {
|
||||
return &FakeProposerIndicesCache{}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
return nil
|
||||
// ProposerIndices is a stub.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(_ primitives.Epoch, _ [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]primitives.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
// UnsafeProposerIndices is a stub.
|
||||
func (c *FakeProposerIndicesCache) UnsafeProposerIndices(_ primitives.Epoch, _ [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
|
||||
// HasProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
|
||||
return false, nil
|
||||
// Prune is a stub.
|
||||
func (p *FakeProposerIndicesCache) Prune(epoch primitives.Epoch) {}
|
||||
|
||||
// Set is a stub.
|
||||
func (p *FakeProposerIndicesCache) Set(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
}
|
||||
|
||||
func (c *FakeProposerIndicesCache) Len() int {
|
||||
return 0
|
||||
// SetUnsafe is a stub.
|
||||
func (p *FakeProposerIndicesCache) SetUnsafe(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
}
|
||||
|
||||
// Clear is a stub.
|
||||
func (c *FakeProposerIndicesCache) Clear() {
|
||||
// SetCheckpoint is a stub.
|
||||
func (p *FakeProposerIndicesCache) SetCheckpoint(c forkchoicetypes.Checkpoint, root [32]byte) {}
|
||||
|
||||
// IndicesFromCheckpoint is a stub.
|
||||
func (p *FakeProposerIndicesCache) IndicesFromCheckpoint(_ forkchoicetypes.Checkpoint) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
|
||||
138
beacon-chain/cache/proposer_indices_test.go
vendored
138
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -3,72 +3,104 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerKeyFn_OK(t *testing.T) {
|
||||
item := &ProposerIndices{
|
||||
BlockRoot: [32]byte{'A'},
|
||||
ProposerIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := proposerIndicesKeyFn(item)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, key(item.BlockRoot), k)
|
||||
}
|
||||
|
||||
func TestProposerKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := proposerIndicesKeyFn("bad")
|
||||
assert.Equal(t, ErrNotProposerIndices, err)
|
||||
}
|
||||
|
||||
func TestProposerCache_AddProposerIndicesList(t *testing.T) {
|
||||
func TestProposerCache_Set(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
bRoot := [32]byte{'A'}
|
||||
indices, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
has, err := cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, has)
|
||||
require.NoError(t, cache.AddProposerIndices(&ProposerIndices{
|
||||
ProposerIndices: indices,
|
||||
BlockRoot: bRoot,
|
||||
}))
|
||||
indices, ok := cache.ProposerIndices(0, bRoot)
|
||||
require.Equal(t, false, ok)
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
require.Equal(t, indices, emptyIndices, "Expected committee count not to exist in empty cache")
|
||||
emptyIndices[0] = 1
|
||||
cache.Set(0, bRoot, emptyIndices)
|
||||
|
||||
received, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
has, err = cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, has)
|
||||
received, ok := cache.ProposerIndices(0, bRoot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, received, emptyIndices)
|
||||
|
||||
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
|
||||
received, err = cache.ProposerIndices(item.BlockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, item.ProposerIndices, received)
|
||||
has, err = cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, has)
|
||||
newRoot := [32]byte{'B'}
|
||||
copy(emptyIndices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
|
||||
cache.Set(0, newRoot, emptyIndices)
|
||||
|
||||
received, ok = cache.ProposerIndices(0, newRoot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
}
|
||||
|
||||
func TestProposerCache_CanRotate(t *testing.T) {
|
||||
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
for i := 0; i < int(maxProposerIndicesCacheSize)+1; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &ProposerIndices{BlockRoot: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
root := [32]byte{'a'}
|
||||
cpRoot := [32]byte{'b'}
|
||||
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
|
||||
for i := 1; i < 10; i++ {
|
||||
cache.Set(primitives.Epoch(i), root, indices)
|
||||
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
|
||||
}
|
||||
assert.Equal(t, int(maxProposerIndicesCacheSize), cache.Len())
|
||||
received, ok := cache.ProposerIndices(1, root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
cache.Prune(5)
|
||||
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
received, ok = cache.ProposerIndices(1, root)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, root)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
}
|
||||
|
||||
5
beacon-chain/cache/skip_slot_cache.go
vendored
5
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -15,6 +15,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// Delay parameters
|
||||
minDelay = float64(10) // 10 nanoseconds
|
||||
maxDelay = float64(100000000) // 0.1 second
|
||||
delayFactor = 1.1
|
||||
|
||||
// Metrics
|
||||
skipSlotCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "skip_slot_cache_hit",
|
||||
|
||||
10
beacon-chain/cache/subnet_ids.go
vendored
10
beacon-chain/cache/subnet_ids.go
vendored
@@ -24,6 +24,8 @@ type subnetIDs struct {
|
||||
// SubnetIDs for attester and aggregator.
|
||||
var SubnetIDs = newSubnetIDs()
|
||||
|
||||
var subnetKey = "persistent-subnets"
|
||||
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
@@ -91,11 +93,11 @@ func (s *subnetIDs) GetAggregatorSubnetIDs(slot primitives.Slot) []uint64 {
|
||||
|
||||
// GetPersistentSubnets retrieves the persistent subnet and expiration time of that validator's
|
||||
// subscription.
|
||||
func (s *subnetIDs) GetPersistentSubnets(pubkey []byte) ([]uint64, bool, time.Time) {
|
||||
func (s *subnetIDs) GetPersistentSubnets() ([]uint64, bool, time.Time) {
|
||||
s.subnetsLock.RLock()
|
||||
defer s.subnetsLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.persistentSubnets.GetWithExpiration(string(pubkey))
|
||||
id, duration, ok := s.persistentSubnets.GetWithExpiration(subnetKey)
|
||||
if !ok {
|
||||
return []uint64{}, ok, time.Time{}
|
||||
}
|
||||
@@ -122,11 +124,11 @@ func (s *subnetIDs) GetAllSubnets() []uint64 {
|
||||
|
||||
// AddPersistentCommittee adds the relevant committee for that particular validator along with its
|
||||
// expiration period.
|
||||
func (s *subnetIDs) AddPersistentCommittee(pubkey []byte, comIndex []uint64, duration time.Duration) {
|
||||
func (s *subnetIDs) AddPersistentCommittee(comIndex []uint64, duration time.Duration) {
|
||||
s.subnetsLock.Lock()
|
||||
defer s.subnetsLock.Unlock()
|
||||
|
||||
s.persistentSubnets.Set(string(pubkey), comIndex, duration)
|
||||
s.persistentSubnets.Set(subnetKey, comIndex, duration)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
|
||||
19
beacon-chain/cache/subnet_ids_test.go
vendored
19
beacon-chain/cache/subnet_ids_test.go
vendored
@@ -3,10 +3,8 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
@@ -46,21 +44,8 @@ func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
|
||||
c := newSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
|
||||
}
|
||||
c.AddPersistentCommittee([]uint64{0, 1, 2, 7, 8}, 0)
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, ok, _ := c.GetPersistentSubnets(pubkey[:])
|
||||
if !ok {
|
||||
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
|
||||
continue
|
||||
}
|
||||
require.Equal(t, i, idxs[0])
|
||||
}
|
||||
coms := c.GetAllSubnets()
|
||||
assert.Equal(t, 20, len(coms))
|
||||
assert.Equal(t, 5, len(coms))
|
||||
}
|
||||
|
||||
43
beacon-chain/cache/tracked_validators.go
vendored
Normal file
43
beacon-chain/cache/tracked_validators.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
}
|
||||
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
return &TrackedValidatorsCache{
|
||||
trackedValidators: make(map[primitives.ValidatorIndex]TrackedValidator),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
}
|
||||
@@ -24,12 +24,12 @@ import (
|
||||
func ProcessAttestationsNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
b interfaces.ReadOnlySignedBeaconBlock,
|
||||
b interfaces.ReadOnlyBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
if b == nil || b.IsNil() {
|
||||
return nil, consensusblocks.ErrNilBeaconBlock
|
||||
}
|
||||
body := b.Block().Body()
|
||||
body := b.Body()
|
||||
totalBalance, err := helpers.TotalActiveBalance(beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -110,13 +110,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
want := "source check point not equal to current justified checkpoint"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -151,14 +151,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
want := "source check point not equal to previous justified checkpoint"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.ErrorContains(t, expected, err)
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -426,7 +426,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb.Block())
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, s, b)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ type AttDelta struct {
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
@@ -86,7 +86,7 @@ func ProcessInactivityScores(
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -155,7 +155,7 @@ func ProcessEpochParticipation(
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
cp, err := beaconState.CurrentEpochParticipation()
|
||||
|
||||
@@ -276,7 +276,7 @@ func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
|
||||
if params.BeaconConfig().MaximumGossipClockDisparityDuration() < 200*time.Millisecond {
|
||||
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
|
||||
}
|
||||
|
||||
@@ -326,7 +326,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconConfig().MaximumGossipClockDisparityDuration())),
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
@@ -334,7 +334,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY-1000ms",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
|
||||
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconConfig().MaximumGossipClockDisparityDuration() + 1000*time.Millisecond),
|
||||
},
|
||||
wantedErr: "(message slot 100) not within allowable range of",
|
||||
},
|
||||
@@ -342,7 +342,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconConfig().MaximumGossipClockDisparityDuration())),
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
@@ -350,7 +350,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
name: "sync_message.slot > current_slot+CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 101,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconConfig().MaximumGossipClockDisparityDuration())),
|
||||
},
|
||||
wantedErr: "(message slot 101) not within allowable range of",
|
||||
},
|
||||
@@ -366,7 +366,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := altair.ValidateSyncMessageTime(tt.args.syncMessageSlot, tt.args.genesisTime,
|
||||
params.BeaconNetworkConfig().MaximumGossipClockDisparity)
|
||||
params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
func ProcessAttestationsNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
b interfaces.ReadOnlySignedBeaconBlock,
|
||||
b interfaces.ReadOnlyBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
if b == nil || b.IsNil() {
|
||||
return nil, blocks.ErrNilBeaconBlock
|
||||
}
|
||||
body := b.Block().Body()
|
||||
body := b.Body()
|
||||
var err error
|
||||
for idx, att := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, att)
|
||||
|
||||
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wsb)
|
||||
r, err := ProcessAttestationsNoVerifySignature(ctx, s, wsb.Block())
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func ProcessVoluntaryExits(
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ValidatorAlreadyExitedErr) {
|
||||
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,11 +28,11 @@ const executionToBLSPadding = 12
|
||||
// signature set.
|
||||
func ProcessBLSToExecutionChanges(
|
||||
st state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
if signed.Version() < version.Capella {
|
||||
b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
if b.Version() < version.Capella {
|
||||
return st, nil
|
||||
}
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
changes, err := b.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
@@ -1134,12 +1134,9 @@ func TestProcessBLSToExecutionChanges(t *testing.T) {
|
||||
bpb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
sbpb := ðpb.SignedBeaconBlockCapella{
|
||||
Block: bpb,
|
||||
}
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(sbpb)
|
||||
bb, err := consensusblocks.NewBeaconBlock(bpb)
|
||||
require.NoError(t, err)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, signed)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, bb)
|
||||
require.NoError(t, err)
|
||||
vals := st.Validators()
|
||||
for _, val := range vals {
|
||||
|
||||
@@ -114,7 +114,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package operation
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -58,5 +59,5 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *ethpb.SignedBlobSidecar
|
||||
Blob *blocks.VerifiedROBlob
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -113,7 +113,7 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
|
||||
slotSinceStart := slots.SinceEpochStarts(attSlot)
|
||||
comCount := SlotCommitteeCount(activeValCount)
|
||||
commsSinceStart := uint64(slotSinceStart.Mul(comCount))
|
||||
computedSubnet := (commsSinceStart + uint64(comIdx)) % params.BeaconNetworkConfig().AttestationSubnetCount
|
||||
computedSubnet := (commsSinceStart + uint64(comIdx)) % params.BeaconConfig().AttestationSubnetCount
|
||||
return computedSubnet
|
||||
}
|
||||
|
||||
@@ -151,8 +151,8 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
// An attestation cannot be older than the current slot - attestation propagation slot range
|
||||
// with a minor tolerance for peer clock disparity.
|
||||
lowerBoundsSlot := primitives.Slot(0)
|
||||
if currentSlot > params.BeaconNetworkConfig().AttestationPropagationSlotRange {
|
||||
lowerBoundsSlot = currentSlot - params.BeaconNetworkConfig().AttestationPropagationSlotRange
|
||||
if currentSlot > params.BeaconConfig().AttestationPropagationSlotRange {
|
||||
lowerBoundsSlot = currentSlot - params.BeaconConfig().AttestationPropagationSlotRange
|
||||
}
|
||||
lowerTime, err := slots.ToTime(uint64(genesisTime.Unix()), lowerBoundsSlot)
|
||||
if err != nil {
|
||||
|
||||
@@ -90,7 +90,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
|
||||
if params.BeaconConfig().MaximumGossipClockDisparityDuration() < 200*time.Millisecond {
|
||||
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
{
|
||||
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE",
|
||||
args: args{
|
||||
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
|
||||
attSlot: 100 - params.BeaconConfig().AttestationPropagationSlotRange - 1,
|
||||
genesisTime: prysmTime.Now().Add(-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
wantedErr: "not within attestation propagation range",
|
||||
@@ -147,14 +147,14 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE",
|
||||
args: args{
|
||||
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
attSlot: 100 - params.BeaconConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late",
|
||||
args: args{
|
||||
attSlot: 100 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
attSlot: 100 - params.BeaconConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(
|
||||
-100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
@@ -163,21 +163,21 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
{
|
||||
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
|
||||
attSlot: 300 - params.BeaconConfig().AttestationPropagationSlotRange - 1,
|
||||
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
attSlot: 300 - params.BeaconConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
attSlot: 300 - params.BeaconConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(
|
||||
-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
@@ -186,7 +186,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
{
|
||||
name: "attestation.slot != current epoch or previous epoch in deneb",
|
||||
args: args{
|
||||
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
|
||||
attSlot: 300 - params.BeaconConfig().AttestationPropagationSlotRange,
|
||||
genesisTime: prysmTime.Now().Add(
|
||||
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := helpers.ValidateAttestationTime(tt.args.attSlot, tt.args.genesisTime,
|
||||
params.BeaconNetworkConfig().MaximumGossipClockDisparity)
|
||||
params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
@@ -12,7 +11,9 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/slice"
|
||||
@@ -333,36 +334,27 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
|
||||
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
|
||||
// Input state is used to retrieve active validator indices.
|
||||
// Input root is to use as key in the cache.
|
||||
// Input epoch is the epoch to retrieve proposer indices for.
|
||||
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
|
||||
// The cache uses the state root at the (current epoch - 1)'s slot as key. (e.g. for epoch 2, the key is root at slot 63)
|
||||
// Which is the reason why we skip genesis epoch.
|
||||
// The cache uses the state root at the end of (current epoch - 1) as key.
|
||||
// (e.g. for epoch 2, the key is root at slot 63)
|
||||
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use state root from (current_epoch - 1))
|
||||
s, err := slots.EpochEnd(epoch - 1)
|
||||
slot, err := slots.EpochEnd(epoch - 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := state.StateRootAtIndex(uint64(s % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
root, err := StateRootAtSlot(state, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip cache update if we have an invalid key
|
||||
if r == nil || bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
|
||||
return nil
|
||||
}
|
||||
// Skip cache update if the key already exists
|
||||
exists, err := proposerIndicesCache.HasProposerIndices(bytesutil.ToBytes32(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -371,16 +363,38 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
|
||||
BlockRoot: bytesutil.ToBytes32(r),
|
||||
ProposerIndices: proposerIndices,
|
||||
})
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
}
|
||||
// This is here to deal with tests only
|
||||
var indicesArray [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
copy(indicesArray[:], proposerIndices)
|
||||
proposerIndicesCache.Prune(epoch - 2)
|
||||
proposerIndicesCache.Set(epoch, [32]byte(root), indicesArray)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCachedCheckpointToStateRoot updates the map from checkpoints to state root in the proposer indices cache
|
||||
func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *forkchoicetypes.Checkpoint) error {
|
||||
if cp.Epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
slot, err := slots.EpochEnd(cp.Epoch - 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := state.StateRootAtIndex(uint64(slot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndicesCache.SetCheckpoint(*cp, [32]byte(root))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache.Clear()
|
||||
proposerIndicesCache.Clear()
|
||||
proposerIndicesCache.Prune(0)
|
||||
syncCommitteeCache.Clear()
|
||||
balanceCache.Clear()
|
||||
}
|
||||
|
||||
@@ -20,10 +20,14 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_in_progress_hit",
|
||||
Help: "The number of committee requests that are present in the cache.",
|
||||
})
|
||||
var (
|
||||
CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_in_progress_hit",
|
||||
Help: "The number of committee requests that are present in the cache.",
|
||||
})
|
||||
|
||||
errProposerIndexMiss = errors.New("propoposer index not found in cache")
|
||||
)
|
||||
|
||||
// IsActiveValidator returns the boolean value on whether the validator
|
||||
// is active or not.
|
||||
@@ -259,12 +263,32 @@ func ValidatorActivationChurnLimitDeneb(activeValidatorCount uint64) uint64 {
|
||||
// indices = get_active_validator_indices(state, epoch)
|
||||
// return compute_proposer_index(state, indices, seed)
|
||||
func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (primitives.ValidatorIndex, error) {
|
||||
e := time.CurrentEpoch(state)
|
||||
return BeaconProposerIndexAtSlot(ctx, state, state.Slot())
|
||||
}
|
||||
|
||||
// cachedProposerIndexAtSlot returns the proposer index at the given slot from
|
||||
// the cache at the given root key.
|
||||
func cachedProposerIndexAtSlot(slot primitives.Slot, root [32]byte) (primitives.ValidatorIndex, error) {
|
||||
proposerIndices, has := proposerIndicesCache.ProposerIndices(slots.ToEpoch(slot), root)
|
||||
if !has {
|
||||
cache.ProposerIndicesCacheMiss.Inc()
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
cache.ProposerIndicesCacheMiss.Inc()
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
e := slots.ToEpoch(slot)
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
wantedEpoch := time.PrevEpoch(state)
|
||||
s, err := slots.EpochEnd(wantedEpoch)
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -273,18 +297,16 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
return 0, err
|
||||
}
|
||||
if r != nil && !bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
|
||||
proposerIndices, err := proposerIndicesCache.ProposerIndices(bytesutil.ToBytes32(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not interface with committee cache")
|
||||
pid, err := cachedProposerIndexAtSlot(slot, [32]byte(r))
|
||||
if err == nil {
|
||||
return pid, nil
|
||||
}
|
||||
if proposerIndices != nil {
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return 0, errors.Errorf("length of proposer indices is not equal %d to slots per epoch", len(proposerIndices))
|
||||
}
|
||||
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
if err := UpdateProposerIndicesInCache(ctx, state, e); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
if err := UpdateProposerIndicesInCache(ctx, state, time.CurrentEpoch(state)); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
pid, err = cachedProposerIndexAtSlot(slot, [32]byte(r))
|
||||
if err == nil {
|
||||
return pid, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -294,7 +316,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
return 0, errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(state.Slot()))...)
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(slot))...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
|
||||
indices, err := ActiveValidatorIndices(ctx, state, e)
|
||||
|
||||
@@ -264,7 +264,6 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
require.NoError(t, state.SetSlot(100))
|
||||
_, err = BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, proposerIndicesCache.Len())
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
|
||||
@@ -324,11 +324,11 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
}
|
||||
|
||||
// VerifyOperationLengths verifies that block operation lengths are valid.
|
||||
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
if b == nil || b.IsNil() {
|
||||
return nil, blocks.ErrNilBeaconBlock
|
||||
}
|
||||
body := b.Block().Body()
|
||||
body := b.Body()
|
||||
|
||||
if uint64(len(body.ProposerSlashings())) > params.BeaconConfig().MaxProposerSlashings {
|
||||
return nil, fmt.Errorf(
|
||||
|
||||
@@ -103,18 +103,18 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
bb := ðpb.SignedBeaconBlock{}
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Block == nil || bb.Block.Body == nil {
|
||||
if bb.Body == nil {
|
||||
continue
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
wb, err := blocks.NewBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wsb)
|
||||
s, err := ProcessOperationsNoVerifyAttsSigs(ctx, state, wb)
|
||||
if err != nil && s != nil {
|
||||
t.Fatalf("state should be nil on err. found: %v on error: %v for block body: %v", s, err, bb)
|
||||
}
|
||||
@@ -126,18 +126,18 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
|
||||
defer SkipSlotCache.Enable()
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
bb := ðpb.SignedBeaconBlock{}
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Block == nil || bb.Block.Body == nil {
|
||||
if bb.Body == nil {
|
||||
continue
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
wb, err := blocks.NewBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
_, err = VerifyOperationLengths(context.Background(), state, wsb)
|
||||
_, err = VerifyOperationLengths(context.Background(), state, wb)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,26 +239,26 @@ func ProcessBlockNoVerifyAnySig(
|
||||
func ProcessOperationsNoVerifyAttsSigs(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessOperationsNoVerifyAttsSigs")
|
||||
defer span.End()
|
||||
if err := blocks.BeaconBlockIsNil(signedBeaconBlock); err != nil {
|
||||
return nil, err
|
||||
if beaconBlock == nil || beaconBlock.IsNil() {
|
||||
return nil, blocks.ErrNilBeaconBlock
|
||||
}
|
||||
|
||||
if _, err := VerifyOperationLengths(ctx, state, signedBeaconBlock); err != nil {
|
||||
if _, err := VerifyOperationLengths(ctx, state, beaconBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify operation lengths")
|
||||
}
|
||||
|
||||
var err error
|
||||
switch signedBeaconBlock.Version() {
|
||||
switch beaconBlock.Version() {
|
||||
case version.Phase0:
|
||||
state, err = phase0Operations(ctx, state, signedBeaconBlock)
|
||||
state, err = phase0Operations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
state, err = altairOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -342,7 +342,7 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process eth1 data")
|
||||
}
|
||||
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed)
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process block operation")
|
||||
@@ -382,48 +382,48 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, signedBeaconBlock)
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return b.ProcessBLSToExecutionChanges(st, signedBeaconBlock)
|
||||
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, signedBeaconBlock)
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
if _, err := b.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
if _, err := b.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func TestProcessOperationsNoVerifyAttsSigsCapella_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb)
|
||||
_, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), beaconState, wsb.Block())
|
||||
wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16"
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
@@ -414,7 +414,7 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -433,7 +433,7 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -451,7 +451,7 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -470,7 +470,7 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -492,7 +492,7 @@ func TestProcessBlock_IncorrectDeposits(t *testing.T) {
|
||||
s.Eth1Data().DepositCount-s.Eth1DepositIndex(), len(b.Block.Body.Deposits))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// ValidatorAlreadyExitedErr is an error raised when trying to process an exit of
|
||||
// ErrValidatorAlreadyExited is an error raised when trying to process an exit of
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
var ErrValidatorAlreadyExited = errors.New("validator already exited")
|
||||
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
@@ -76,7 +76,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
return nil, 0, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, validator.ExitEpoch, ValidatorAlreadyExitedErr
|
||||
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
@@ -136,7 +136,7 @@ func SlashValidator(
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
|
||||
require.ErrorIs(t, err, ValidatorAlreadyExitedErr)
|
||||
require.ErrorIs(t, err, ErrValidatorAlreadyExited)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,6 +5,8 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
"metrics.go",
|
||||
"pruner.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -18,8 +20,9 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
@@ -37,7 +40,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,49 +1,48 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name > MaxBlobsPerBlock")
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
|
||||
bufferEpochs = 2
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage)
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
|
||||
return func(b *BlobStorage) {
|
||||
b.retentionEpochs = e
|
||||
return func(b *BlobStorage) error {
|
||||
pruner, err := newblobPruner(b.fs, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.pruner = pruner
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,21 +55,26 @@ func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
b := &BlobStorage{fs: fs, retentionEpochs: params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest}
|
||||
b := &BlobStorage{
|
||||
fs: fs,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(b)
|
||||
if err := o(b); err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
retentionEpochs primitives.Epoch
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
@@ -81,6 +85,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
bs.pruner.try(sidecar.BlockRoot(), sidecar.Slot())
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
@@ -91,6 +98,18 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath()
|
||||
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
@@ -115,6 +134,8 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
blobsTotalGauge.Inc()
|
||||
blobSaveLatency.Observe(time.Since(startTime).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -122,6 +143,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
startTime := time.Now()
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
var v blocks.VerifiedROBlob
|
||||
@@ -136,6 +158,9 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(time.Since(startTime).Seconds())
|
||||
}()
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
@@ -186,7 +211,7 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return fmt.Sprintf("%#x", p.root)
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
@@ -201,74 +226,6 @@ func (p blobNamer) path() string {
|
||||
return p.fname(sszExt)
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (bs *BlobStorage) Prune(currentSlot primitives.Slot) error {
|
||||
retentionSlots, err := slots.EpochStart(bs.retentionEpochs + bufferEpochs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if currentSlot < retentionSlots {
|
||||
return nil // Overflow would occur
|
||||
}
|
||||
|
||||
folders, err := afero.ReadDir(bs.fs, ".")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, folder := range folders {
|
||||
if folder.IsDir() {
|
||||
if err := bs.processFolder(folder, currentSlot, retentionSlots); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processFolder will delete the folder of blobs if the blob slot is outside the
|
||||
// retention period. We determine the slot by looking at the first blob in the folder.
|
||||
func (bs *BlobStorage) processFolder(folder os.FileInfo, currentSlot, retentionSlots primitives.Slot) error {
|
||||
f, err := bs.fs.Open(filepath.Join(folder.Name(), "0."+sszExt))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
|
||||
slot, err := slotFromBlob(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if slot < (currentSlot - retentionSlots) {
|
||||
if err = bs.fs.RemoveAll(folder.Name()); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete blob %s", f.Name())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
// Delete removes the directory matching the provided block root and all the blobs it contains.
|
||||
func (bs *BlobStorage) Delete(root [32]byte) error {
|
||||
if err := bs.fs.RemoveAll(hexutil.Encode(root[:])); err != nil {
|
||||
return fmt.Errorf("failed to delete blobs for root %#x: %w", root, err)
|
||||
}
|
||||
return nil
|
||||
func rootString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -71,6 +73,73 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
t.Run("check pruning", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Slot in first half of epoch therefore should not prune
|
||||
bs.pruner.try(testSidecars[0].BlockRoot(), testSidecars[0].Slot())
|
||||
err = bs.Save(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
actual, err := bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, testSidecars[0], actual)
|
||||
err = pollUntil(t, fs, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 33, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars1, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
// Slot in first half of epoch therefore should not prune
|
||||
bs.pruner.try(testSidecars1[0].BlockRoot(), testSidecars1[0].Slot())
|
||||
err = bs.Save(testSidecars1[0])
|
||||
require.NoError(t, err)
|
||||
// Check previous saved sidecar was not pruned
|
||||
actual, err = bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, testSidecars[0], actual)
|
||||
// Check latest sidecar exists
|
||||
actual, err = bs.Get(testSidecars1[0].BlockRoot(), testSidecars1[0].Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, testSidecars1[0], actual)
|
||||
err = pollUntil(t, fs, 2) // Check correct number of files
|
||||
require.NoError(t, err)
|
||||
|
||||
_, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 131187, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars2, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
// Slot in second half of epoch therefore should prune
|
||||
bs.pruner.try(testSidecars2[0].BlockRoot(), testSidecars2[0].Slot())
|
||||
require.NoError(t, err)
|
||||
err = bs.Save(testSidecars2[0])
|
||||
require.NoError(t, err)
|
||||
err = pollUntil(t, fs, 3)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
|
||||
var remainingFolders []os.FileInfo
|
||||
var err error
|
||||
// Define the condition function for polling
|
||||
conditionFunc := func() bool {
|
||||
remainingFolders, err = afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
return len(remainingFolders) == expected
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
if conditionFunc() {
|
||||
break // Condition met, exit the loop
|
||||
}
|
||||
if time.Since(startTime) > 30*time.Second {
|
||||
return errors.New("timeout")
|
||||
}
|
||||
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
|
||||
}
|
||||
require.Equal(t, expected, len(remainingFolders))
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
@@ -118,7 +187,7 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.Prune(currentSlot))
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.retain))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
@@ -138,7 +207,7 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
slot += 10000
|
||||
}
|
||||
|
||||
require.NoError(t, bs.Prune(currentSlot))
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.retain))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
@@ -167,41 +236,11 @@ func BenchmarkPruning(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := bs.Prune(currentSlot)
|
||||
err := bs.pruner.prune(currentSlot)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobStorageDelete(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
rawRoot := "0xcf9bb70c98f58092c9d6459227c9765f984d240be9690e85179bc5a6f60366ad"
|
||||
blockRoot, err := hexutil.Decode(rawRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
exists, err := afero.DirExists(fs, hexutil.Encode(blockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, exists)
|
||||
|
||||
// Delete the directory corresponding to the block root
|
||||
require.NoError(t, bs.Delete(bytesutil.ToBytes32(blockRoot)))
|
||||
|
||||
// Ensure that the directory no longer exists after deletion
|
||||
exists, err = afero.DirExists(fs, hexutil.Encode(blockRoot))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, exists)
|
||||
|
||||
// Deleting a non-existent root does not return an error.
|
||||
require.NoError(t, bs.Delete(bytesutil.ToBytes32([]byte{0x1})))
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -10,16 +10,24 @@ import (
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
|
||||
return &BlobStorage{fs: afero.NewMemMapFs()}
|
||||
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newblobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return &BlobStorage{fs: fs, pruner: pruner}
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
fs := afero.NewMemMapFs()
|
||||
retentionEpoch := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
return fs, &BlobStorage{fs: fs, retentionEpochs: retentionEpoch}, nil
|
||||
pruner, err := newblobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
|
||||
66
beacon-chain/db/filesystem/metrics.go
Normal file
66
beacon-chain/db/filesystem/metrics.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
blobBuckets = []float64{0.00003, 0.00005, 0.00007, 0.00009, 0.00011, 0.00013, 0.00015}
|
||||
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_save_latency",
|
||||
Help: "Latency of blob storage save operations in seconds",
|
||||
Buckets: blobBuckets,
|
||||
})
|
||||
blobFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_get_latency",
|
||||
Help: "Latency of blob storage get operations in seconds",
|
||||
Buckets: blobBuckets,
|
||||
})
|
||||
blobsPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "blob_pruned_blobs_total",
|
||||
Help: "Total number of pruned blobs.",
|
||||
})
|
||||
blobsTotalGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "blobs_on_disk_total",
|
||||
Help: "Total number of blobs in filesystem.",
|
||||
})
|
||||
)
|
||||
|
||||
func (bs *BlobStorage) Initialize() error {
|
||||
if err := bs.collectTotalBlobMetric(); err != nil {
|
||||
return fmt.Errorf("failed to initialize blob metrics: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectTotalBlobMetric set the number of blobs currently present in the filesystem
|
||||
// to the blobsTotalGauge metric.
|
||||
func (bs *BlobStorage) collectTotalBlobMetric() error {
|
||||
totalBlobs := 0
|
||||
folders, err := afero.ReadDir(bs.fs, ".")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, folder := range folders {
|
||||
num, err := bs.countFiles(folder.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalBlobs = totalBlobs + num
|
||||
}
|
||||
blobsTotalGauge.Set(float64(totalBlobs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// countFiles returns the length of blob files for a given directory.
|
||||
func (bs *BlobStorage) countFiles(folderName string) (int, error) {
|
||||
files, err := afero.ReadDir(bs.fs, folderName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(files), nil
|
||||
}
|
||||
248
beacon-chain/db/filesystem/pruner.go
Normal file
248
beacon-chain/db/filesystem/pruner.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const retentionBuffer primitives.Epoch = 2
|
||||
|
||||
var (
|
||||
errBlobSlotUnknown = errors.New("could not determine blob slot from files in storage")
|
||||
errPruningFailures = errors.New("blobs could not be pruned for some roots")
|
||||
)
|
||||
|
||||
type blobPruner struct {
|
||||
sync.Mutex
|
||||
slotMap *slotForRoot
|
||||
retain primitives.Slot
|
||||
fs afero.Fs
|
||||
prunedBefore atomic.Uint64
|
||||
}
|
||||
|
||||
func newblobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
|
||||
r, err := slots.EpochStart(retain + retentionBuffer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
return &blobPruner{fs: fs, retain: r, slotMap: newSlotForRoot()}, nil
|
||||
}
|
||||
|
||||
// tryPrune checks whether we should prune and then calls prune in a goroutine.
|
||||
func (p *blobPruner) try(root [32]byte, latest primitives.Slot) {
|
||||
p.slotMap.ensure(rootString(root), latest)
|
||||
pruned := uint64(pruneBefore(latest, p.retain))
|
||||
if p.prunedBefore.Swap(pruned) == pruned {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
log.WithError(err).Errorf("failed to prune blobs from slot %d", latest)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func pruneBefore(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
return 0
|
||||
}
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
log.Debug("Pruning old blobs")
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
defer func() {
|
||||
log.WithFields(log.Fields{
|
||||
"lastPrunedEpoch": slots.ToEpoch(pruneBefore),
|
||||
"pruneTime": time.Since(start).String(),
|
||||
"numberBlobsPruned": totalPruned,
|
||||
}).Debug("Pruned old blobs")
|
||||
blobsPrunedCounter.Add(float64(totalPruned))
|
||||
}()
|
||||
|
||||
dirs, err := p.listDir(".", filterRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to list root blobs directory")
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
pruned, err := p.tryPruneDir(dir, pruneBefore)
|
||||
if err != nil {
|
||||
totalErr += 1
|
||||
log.WithError(err).WithField("directory", dir).Error("unable to prune directory")
|
||||
}
|
||||
totalPruned += pruned
|
||||
}
|
||||
|
||||
if totalErr > 0 {
|
||||
return errors.Wrapf(errPruningFailures, "pruning failed for %d root directories", totalErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// directoryMeta tries a few different ways to determine the slot for the given directory.
|
||||
// The seconds argument will be nil if the function did not need to list the directory, or
|
||||
// non-nil with a list of files if it did.
|
||||
func (p *blobPruner) directoryMeta(dir string) (primitives.Slot, []string, error) {
|
||||
root := filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
|
||||
// First try the cheap map lookup.
|
||||
slot, ok := p.slotMap.slot(root)
|
||||
if ok {
|
||||
return slot, nil, nil
|
||||
}
|
||||
|
||||
// Next try constructing the path to the zero index blob, which will always be present unless
|
||||
// the blob directory has been damaged by something like a restart during RemoveAll.
|
||||
slot, err := slotFromFile(filepath.Join(dir, "0."+sszExt), p.fs)
|
||||
if err == nil {
|
||||
p.slotMap.ensure(root, slot)
|
||||
return slot, nil, nil
|
||||
}
|
||||
|
||||
// Fall back if getting the slot from index zero failed -- look for any ssz file.
|
||||
files, err := p.listDir(dir, filterSsz)
|
||||
if err != nil {
|
||||
return 0, nil, errors.Wrapf(err, "failed to list blobs in directory %s", dir)
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return 0, files, errors.Wrapf(errBlobSlotUnknown, "contained no blob files")
|
||||
}
|
||||
slot, err = slotFromFile(files[0], p.fs)
|
||||
if err != nil {
|
||||
return 0, nil, errors.Wrapf(err, "slot could not be read from blob file %s", files[0])
|
||||
}
|
||||
p.slotMap.ensure(root, slot)
|
||||
return slot, files, nil
|
||||
}
|
||||
|
||||
// tryPruneDir will delete the directory of blobs if the blob slot is outside the
|
||||
// retention period. We determine the slot by looking at the first blob in the directory.
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
slot, files, err := p.directoryMeta(dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not determine slot for directory %s", dir)
|
||||
}
|
||||
if slot >= pruneBefore {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
files, err = p.listDir(dir, filterSsz)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to list blobs in directory %s", dir)
|
||||
}
|
||||
}
|
||||
if err = p.fs.RemoveAll(dir); err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to delete blobs in %s", dir)
|
||||
}
|
||||
return len(files), nil
|
||||
}
|
||||
|
||||
func slotFromFile(file string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(file)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) listDir(dir string, filter func(string) bool) ([]string, error) {
|
||||
top, err := p.fs.Open(dir)
|
||||
defer func() {
|
||||
if err := top.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close file %s", dir)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open directory descriptor")
|
||||
}
|
||||
dirs, err := top.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory listing")
|
||||
}
|
||||
if filter != nil {
|
||||
filtered := make([]string, 0, len(dirs))
|
||||
for i := range dirs {
|
||||
if filter(dirs[i]) {
|
||||
filtered = append(filtered, dirs[i])
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
func filterRoot(s string) bool {
|
||||
return strings.HasPrefix(s, "0x")
|
||||
}
|
||||
|
||||
func filterSsz(s string) bool {
|
||||
return filepath.Ext(s) == sszExt
|
||||
}
|
||||
|
||||
func newSlotForRoot() *slotForRoot {
|
||||
return &slotForRoot{
|
||||
cache: make(map[string]primitives.Slot, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
type slotForRoot struct {
|
||||
sync.RWMutex
|
||||
cache map[string]primitives.Slot
|
||||
}
|
||||
|
||||
func (s *slotForRoot) ensure(key string, slot primitives.Slot) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.cache[key] = slot
|
||||
}
|
||||
|
||||
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
slot, ok := s.cache[key]
|
||||
return slot, ok
|
||||
}
|
||||
|
||||
func (s *slotForRoot) evict(key string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.cache, key)
|
||||
}
|
||||
@@ -58,7 +58,6 @@ type ReadOnlyDatabase interface {
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -95,7 +94,6 @@ type NoHeadAccessDatabase interface {
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.DeprecatedBlobSidecar) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
|
||||
@@ -110,6 +110,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index primitives.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -26,7 +26,7 @@ func (s *Store) LastArchivedSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -44,7 +44,7 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -61,7 +61,7 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot primitives.Slot) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -67,7 +67,7 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlob
|
||||
var prune []blobRotatingKey
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
var existing []byte
|
||||
sc := ðpb.BlobSidecars{}
|
||||
sc := ðpb.DeprecatedBlobSidecars{}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
@@ -198,7 +198,7 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
sc := ðpb.DeprecatedBlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
func filterForIndices(sc *ethpb.DeprecatedBlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
sc := ðpb.DeprecatedBlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -205,7 +205,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range newScs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
@@ -229,7 +229,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range scs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
@@ -255,7 +255,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range scs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range scs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
@@ -490,7 +490,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := s.blobSidecarKey(scs[0])
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.DeprecatedBlobSidecars{Sidecars: scs})
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadO
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -72,7 +72,7 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -168,7 +168,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
|
||||
// HasBlock checks if a block by root exists in the db.
|
||||
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return true
|
||||
@@ -379,7 +379,7 @@ func (s *Store) GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeac
|
||||
}
|
||||
|
||||
func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
|
||||
defer span.End()
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -396,7 +396,7 @@ func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -409,7 +409,7 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -420,7 +420,7 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -519,7 +519,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
|
||||
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
|
||||
// Error is returned if `ids` and `recipients` are not the same length.
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, feeRecipients []common.Address) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(feeRecipients) {
|
||||
@@ -644,7 +644,7 @@ func blockRootsBySlotRange(
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
@@ -709,7 +709,7 @@ func blockRootsBySlotRange(
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([][32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
@@ -730,7 +730,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
@@ -758,7 +758,7 @@ func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyB
|
||||
// objects. If a certain filter criterion does not apply to
|
||||
// blocks, an appropriate error is returned.
|
||||
func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter) (map[string][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
for k, v := range f.Filters() {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// DepositContractAddress returns contract address is the address of
|
||||
// the deposit contract on the proof of work chain.
|
||||
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -27,7 +27,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
|
||||
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
|
||||
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user